hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0671b27e8da87d02a9a00645b2c4b05892cec1db
| 1,266
|
py
|
Python
|
example/start_job.py
|
MUAS-DTLab-SoSe20-EnBeeMo/EnBeeMo_I
|
82240e022d4f9968adfd1e455f7f65db74d81d3b
|
[
"MIT"
] | null | null | null |
example/start_job.py
|
MUAS-DTLab-SoSe20-EnBeeMo/EnBeeMo_I
|
82240e022d4f9968adfd1e455f7f65db74d81d3b
|
[
"MIT"
] | null | null | null |
example/start_job.py
|
MUAS-DTLab-SoSe20-EnBeeMo/EnBeeMo_I
|
82240e022d4f9968adfd1e455f7f65db74d81d3b
|
[
"MIT"
] | null | null | null |
# This script is an example how to start a batch of computing jobs with the PCR system.
# Requires a properly set up AWS CLI or other source of credentials.
#
# Usage:
# - copy this script to your machine (optional)
# - install the PCR python package (see readme.md)
# - prepare and push your example docker image
# - replace the placeholders in the parameters below with your endpoints
# - read and comprehend the script
# - run the script
# - wait until finished
# - check results
import json
from PCR.JobSubmitter import JobSubmitter
# infrastructure parameters
containerImage = '<your manually pushed docker image>'
job_queue = '<the aws batch queue - SEE CLOUDFORMATION STACK OUTPUT>'
job_bucket = '<the job bucket name - SEE CLOUDFORMATION STACK OUTPUT>'
batch_name = 'example'
# Initialize job submitter
job_submitter = JobSubmitter(
batch_name=batch_name,
container=containerImage,
job_queue=job_queue,
job_bucket=job_bucket,
)
job_id = job_submitter.submit_job(input_data={
'x': 2,
'y': 3
})
job_submitter.wait_until_jobs_finished()
json_outputs = job_submitter.get_all_outputs()
outputs = []
for json_output in json_outputs:
output = json.loads(json_output)
outputs.append(output)
print(outputs)
| 25.836735
| 87
| 0.741706
|
18427aba2ec30094b23066e7961beb84f69ee439
| 1,701
|
py
|
Python
|
pyti/ichimoku_cloud.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
|
08532970f9d2b163f1223599e3ac80f6c51533e4
|
[
"MIT"
] | 635
|
2017-04-04T20:24:47.000Z
|
2022-03-28T16:00:23.000Z
|
pyti/ichimoku_cloud.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
|
08532970f9d2b163f1223599e3ac80f6c51533e4
|
[
"MIT"
] | 24
|
2017-10-22T15:01:54.000Z
|
2021-01-30T19:51:00.000Z
|
pyti/ichimoku_cloud.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
|
08532970f9d2b163f1223599e3ac80f6c51533e4
|
[
"MIT"
] | 183
|
2017-07-01T16:06:39.000Z
|
2022-03-07T23:29:11.000Z
|
from __future__ import absolute_import
import numpy as np
from pyti import catch_errors
from pyti.function_helper import fill_for_noncomputable_vals
from six.moves import range
def conversion_base_line_helper(data, period):
"""
The only real difference between TenkanSen and KijunSen is the period value
"""
catch_errors.check_for_period_error(data, period)
cblh = [(np.max(data[idx+1-period:idx+1]) +
np.min(data[idx+1-period:idx+1])) / 2 for idx in range(period-1, len(data))]
cblh = fill_for_noncomputable_vals(data, cblh)
return cblh
def tenkansen(data, period=9):
"""
TenkanSen (Conversion Line)
Formula:
(H + L) / 2 :: default period=9
"""
ts = conversion_base_line_helper(data, period)
return ts
def kijunsen(data, period=26):
"""
KijunSen (Base Line)
Formula:
(H + L) / 2 :: default period=26
"""
ks = conversion_base_line_helper(data, period)
return ks
def chiku_span(data):
"""
Chiku Span (Lagging Span)
Formula:
Close shifted back 26 bars
"""
cs = data[25::]
return cs
def senkou_a(data):
"""
Senkou A (Leading Span A)
Formula:
(TenkanSen + KijunSen) / 2 :: Shift Forward 26 bars
"""
sa = (tenkansen(data) + kijunsen(data)) / 2
# shift forward
shift_by = np.repeat(np.nan, 26)
sa = np.append(shift_by, sa)
return sa
def senkou_b(data, period=52):
"""
Senkou B (Leading Span B)
Formula:
(H + L) / 2 :: default period=52 :: shifted forward 26 bars
"""
sb = conversion_base_line_helper(data, period)
shift_by = np.repeat(np.nan, 26)
sb = np.append(shift_by, sb)
return sb
| 21.807692
| 88
| 0.63786
|
5c5c4380f1679f7ad9fa9b96e9ea15042f3033bb
| 10,416
|
py
|
Python
|
detectron2/export/api.py
|
MargeryLab/BMaskR-CNN
|
41f63d301d6be7fa30ba281a5a0f727fbca6ad2a
|
[
"Apache-2.0"
] | null | null | null |
detectron2/export/api.py
|
MargeryLab/BMaskR-CNN
|
41f63d301d6be7fa30ba281a5a0f727fbca6ad2a
|
[
"Apache-2.0"
] | null | null | null |
detectron2/export/api.py
|
MargeryLab/BMaskR-CNN
|
41f63d301d6be7fa30ba281a5a0f727fbca6ad2a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import copy
import logging
import os
import torch
from caffe2.proto import caffe2_pb2
from torch import nn
from detectron2.config import CfgNode as CN
from .caffe2_export import export_caffe2_detection_model
from .caffe2_export import export_onnx_model as export_onnx_model_impl
from .caffe2_export import run_and_save_graph
from .caffe2_inference import ProtobufDetectionModel
from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph
__all__ = [
"add_export_config",
"export_caffe2_model",
"Caffe2Model",
"export_onnx_model",
"Caffe2Tracer",
]
def add_export_config(cfg):
"""
Args:
cfg (CfgNode): a detectron2 config
Returns:
CfgNode: an updated config with new options that will be used
by :class:`Caffe2Tracer`.
"""
is_frozen = cfg.is_frozen()
cfg.defrost()
cfg.EXPORT_CAFFE2 = CN()
cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False
if is_frozen:
cfg.freeze()
return cfg
class Caffe2Tracer:
"""
Make a detectron2 model traceable with caffe2 style.
An original detectron2 model may not be traceable, or
cannot be deployed directly after being traced, due to some reasons:
1. control flow in some ops
2. custom ops
3. complicated pre/post processing
This class provides a traceable version of a detectron2 model by:
1. Rewrite parts of the model using ops in caffe2. Note that some ops do
not have GPU implementation.
2. Define the inputs "after pre-processing" as inputs to the model
3. Remove post-processing and produce raw layer outputs
More specifically about inputs: all builtin models take two input tensors.
1. NCHW float "data" which is an image (usually in [0, 255])
2. Nx3 float "im_info", each row of which is (height, width, 1.0)
After making a traceable model, the class provide methods to export such a
model to different deployment formats.
The class currently only supports models using builtin meta architectures.
"""
def __init__(self, cfg, model, inputs):
"""
Args:
cfg (CfgNode): a detectron2 config, with extra export-related options
added by :func:`add_export_config`.
model (nn.Module): a model built by
:func:`detectron2.modeling.build_model`. Weights have to be already
loaded to this model.
inputs: sample inputs that the given model takes for inference.
Will be used to trace the model. Random input with no detected objects
will not work if the model has data-dependent control flow (e.g., R-CNN).
"""
assert isinstance(cfg, CN), cfg
assert isinstance(model, torch.nn.Module), type(model)
if "EXPORT_CAFFE2" not in cfg:
cfg = add_export_config(cfg) # will just the defaults
self.cfg = cfg
self.model = model
self.inputs = inputs
def _get_traceable(self):
# TODO how to make it extensible to support custom models
C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[self.cfg.MODEL.META_ARCHITECTURE]
traceable_model = C2MetaArch(self.cfg, copy.deepcopy(self.model))
traceable_inputs = traceable_model.get_caffe2_inputs(self.inputs)
return traceable_model, traceable_inputs
def export_caffe2(self):
"""
Export the model to Caffe2's protobuf format.
The returned object can be saved with ``.save_protobuf()`` method.
The result can be loaded and executed using Caffe2 runtime.
Returns:
Caffe2Model
"""
model, inputs = self._get_traceable()
predict_net, init_net = export_caffe2_detection_model(model, inputs)
return Caffe2Model(predict_net, init_net)
def export_onnx(self):
"""
Export the model to ONNX format.
Note that the exported model contains custom ops only available in caffe2, therefore it
cannot be directly executed by other runtime. Post-processing or transformation passes
may be applied on the model to accommodate different runtimes.
Returns:
onnx.ModelProto: an onnx model.
"""
model, inputs = self._get_traceable()
return export_onnx_model_impl(model, (inputs,))
def export_torchscript(self):
"""
Export the model to a ``torch.jit.TracedModule`` by tracing.
The returned object can be saved to a file by ``.save()``.
Returns:
torch.jit.TracedModule: a torch TracedModule
"""
model, inputs = self._get_traceable()
logger = logging.getLogger(__name__)
logger.info("Tracing the model with torch.jit.trace ...")
with torch.no_grad():
return torch.jit.trace(model, (inputs,), optimize=True)
def export_caffe2_model(cfg, model, inputs):
"""
Export a detectron2 model to caffe2 format.
Args:
cfg (CfgNode): a detectron2 config, with extra export-related options
added by :func:`add_export_config`.
model (nn.Module): a model built by
:func:`detectron2.modeling.build_model`.
It will be modified by this function.
inputs: sample inputs that the given model takes for inference.
Will be used to trace the model.
Returns:
Caffe2Model
"""
return Caffe2Tracer(cfg, model, inputs).export_caffe2()
def export_onnx_model(cfg, model, inputs):
"""
Export a detectron2 model to ONNX format.
Note that the exported model contains custom ops only available in caffe2, therefore it
cannot be directly executed by other runtime. Post-processing or transformation passes
may be applied on the model to accommodate different runtimes.
Args:
cfg (CfgNode): a detectron2 config, with extra export-related options
added by :func:`add_export_config`.
model (nn.Module): a model built by
:func:`detectron2.modeling.build_model`.
It will be modified by this function.
inputs: sample inputs that the given model takes for inference.
Will be used to trace the model.
Returns:
onnx.ModelProto: an onnx model.
"""
return Caffe2Tracer(cfg, model, inputs).export_onnx()
class Caffe2Model(nn.Module):
"""
A wrapper around the traced model in caffe2's pb format.
"""
def __init__(self, predict_net, init_net):
super().__init__()
self.eval() # always in eval mode
self._predict_net = predict_net
self._init_net = init_net
self._predictor = None
@property
def predict_net(self):
"""
Returns:
core.Net: the underlying caffe2 predict net
"""
return self._predict_net
@property
def init_net(self):
"""
Returns:
core.Net: the underlying caffe2 init net
"""
return self._init_net
__init__.__HIDE_SPHINX_DOC__ = True
def save_protobuf(self, output_dir):
"""
Save the model as caffe2's protobuf format.
Args:
output_dir (str): the output directory to save protobuf files.
"""
logger = logging.getLogger(__name__)
logger.info("Saving model to {} ...".format(output_dir))
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "model.pb"), "wb") as f:
f.write(self._predict_net.SerializeToString())
with open(os.path.join(output_dir, "model.pbtxt"), "w") as f:
f.write(str(self._predict_net))
with open(os.path.join(output_dir, "model_init.pb"), "wb") as f:
f.write(self._init_net.SerializeToString())
def save_graph(self, output_file, inputs=None):
"""
Save the graph as SVG format.
Args:
output_file (str): a SVG file
inputs: optional inputs given to the model.
If given, the inputs will be used to run the graph to record
shape of every tensor. The shape information will be
saved together with the graph.
"""
if inputs is None:
save_graph(self._predict_net, output_file, op_only=False)
else:
size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0)
device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii")
inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device)
inputs = [x.cpu().numpy() for x in inputs]
run_and_save_graph(self._predict_net, self._init_net, inputs, output_file)
@staticmethod
def load_protobuf(dir):
"""
Args:
dir (str): a directory used to save Caffe2Model with
:meth:`save_protobuf`.
The files "model.pb" and "model_init.pb" are needed.
Returns:
Caffe2Model: the caffe2 model loaded from this directory.
"""
predict_net = caffe2_pb2.NetDef()
with open(os.path.join(dir, "model.pb"), "rb") as f:
predict_net.ParseFromString(f.read())
init_net = caffe2_pb2.NetDef()
with open(os.path.join(dir, "model_init.pb"), "rb") as f:
init_net.ParseFromString(f.read())
return Caffe2Model(predict_net, init_net)
def __call__(self, inputs):
"""
An interface that wraps around a caffe2 model and mimics detectron2's models'
input & output format. This is used to compare the outputs of caffe2 model
with its original torch model.
Due to the extra conversion between torch/caffe2,
this method is not meant for benchmark.
"""
if self._predictor is None:
self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net)
return self._predictor(inputs)
| 36.676056
| 99
| 0.634697
|
466e32946f419b8db1c9881776a13054a29dca37
| 74
|
py
|
Python
|
src/functions/array/__init__.py
|
pfnet-research/label-efficient-brain-tumor-segmentation
|
aad80ed7acb510a3147bb11c3910d2e17fb355d1
|
[
"MIT"
] | 21
|
2020-09-23T11:05:14.000Z
|
2021-12-09T13:32:59.000Z
|
src/functions/array/__init__.py
|
shiontao/label-efficient-brain-tumor-segmentation
|
aad80ed7acb510a3147bb11c3910d2e17fb355d1
|
[
"MIT"
] | null | null | null |
src/functions/array/__init__.py
|
shiontao/label-efficient-brain-tumor-segmentation
|
aad80ed7acb510a3147bb11c3910d2e17fb355d1
|
[
"MIT"
] | 5
|
2020-10-29T05:57:15.000Z
|
2021-07-16T11:30:02.000Z
|
from src.functions.array.resize_images_3d import resize_images_3d # NOQA
| 37
| 73
| 0.851351
|
73be96430087f0e7e27fd24c23625484bf559e01
| 1,277
|
py
|
Python
|
main.py
|
apotl/ayydio
|
ba1aa0b95d6118e763c6c383181e07f9e0575c63
|
[
"BSD-2-Clause"
] | null | null | null |
main.py
|
apotl/ayydio
|
ba1aa0b95d6118e763c6c383181e07f9e0575c63
|
[
"BSD-2-Clause"
] | null | null | null |
main.py
|
apotl/ayydio
|
ba1aa0b95d6118e763c6c383181e07f9e0575c63
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
from mpd import MPDClient
from lib.Song import Song
print('ayydioctrl micro')
c = MPDClient()
c.timeout = 10
c.idletimeout = None
c.connect("localhost",6600)
c.update()
def lsdb():
#for each file/dir
for f in c.lsinfo():
try:
print('\033[31m'+ f['directory'])
for ff in c.lsinfo(f['directory']):
try:
print('\033[32m' + ff['file'])
except KeyError:
pass
except KeyError:
print('\033[32m' + f['file'])
print('\033[30m')
def makeChoice():
print('\033[34m')
print('(1) list all available files in db')
print('(2) queue a song(s)')
print('(3) play')
print('(4) next song')
print('(5) show current playlist')
print('(6) search')
print('(q) quit')
choice = input('option: ')
return choice
def queueUri(uri):
try:
c.add(uri)
except:
pass
def searchdb(query):
print()
for result in c.search('file',str(query)):
print(result['file'])
while True:
x = makeChoice()
if x == '1':
lsdb()
elif x == '2':
queueUri(input('Enter uri: '))
elif x == '3':
c.play( 0)
elif x == '4':
c.next()
elif x == '5':
print('Current playlist:')
for song in c.playlist():
print (song)
elif x == '6':
query = input('Search string: ')
searchdb(query)
elif x == 'q':
break
c.close()
c.disconnect()
| 18.242857
| 44
| 0.605325
|
79a60fcb1dde792cb8826ede191818b2a8c19dcd
| 2,976
|
py
|
Python
|
lambda-python-git-secrets/demo4.py
|
mbacchi/secret-leak-prevention-demo
|
770dec9235b5dfbfb18465743aa6e1687c707eb0
|
[
"BSD-2-Clause"
] | 3
|
2018-03-05T21:02:26.000Z
|
2022-03-25T18:08:01.000Z
|
lambda-python-git-secrets/demo4.py
|
mbacchi/secret-leak-prevention-demo
|
770dec9235b5dfbfb18465743aa6e1687c707eb0
|
[
"BSD-2-Clause"
] | null | null | null |
lambda-python-git-secrets/demo4.py
|
mbacchi/secret-leak-prevention-demo
|
770dec9235b5dfbfb18465743aa6e1687c707eb0
|
[
"BSD-2-Clause"
] | 1
|
2018-03-05T23:05:51.000Z
|
2018-03-05T23:05:51.000Z
|
import os
import random
import re
import shutil
from dulwich import porcelain
from gitsecrets import GitSecrets
from string import ascii_uppercase, ascii_lowercase, digits
from tempfile import mkdtemp
# For this demo we use Dulwich instead of Git to clone a repository from GitHub.
#
# This is because in an AWS Lambda function we don't want to rely on arbitrary
# binaries such as Git being installed, we would rather use pure Python tools,
# such as Dulwich.
#
# Dulwich is available via pip, or at https://github.com/jelmer/dulwich
#
# We also use python-git-secrets which is available via pip, or at
# https://github.com/mbacchi/python-git-secrets.git
#
class Devnull(object):
"""
This mimics a stream to write to for dulwich porcelain status output. Since we
don't want to see the status this is a hack to suppress anything printing on stdout.
Borrowed from:
https://stackoverflow.com/questions/2929899/cross-platform-dev-null-in-python
"""
def write(self, *_): pass
def newfile(path, content):
with open(path, "w") as f:
f.write(content + '\n')
def python_git_secrets(event, context):
# Set the GitHub repository to clone and the directory to clone into for
# demonstration purposes
repo = 'https://github.com/mbacchi/python-git-secrets.git'
target = mkdtemp()
# If the target path exists remove it so we don't error out when cloning
# later
if os.path.exists(target):
print("Removing directory \'{}\'...".format(target))
shutil.rmtree(target)
print("Cloning repository \'{}\' into \'{}\'...\n".format(repo, target))
# Perform the clone operation
nullstream = open(os.devnull, "w")
newrepo = porcelain.clone(repo, target, errstream=Devnull())
# Create a random uppercase string that looks like an AWS ACCES_KEY_ID value
print("Creating file in directory \'{}\' with content that looks like an AWS ACCESS_KEY_ID\n".format(target))
patterns = [''.join("A3T" + random.choice(ascii_uppercase)), 'AKIA',
'AGPA', 'AIDA', 'AROA', 'AIPA', 'ANPA', 'ANVA', 'ASIA']
prefix = random.choice(patterns)
generated = ''.join(random.choice(ascii_uppercase) for _ in range(16))
key = prefix + generated
newfile(target + '/aws-credentials', "aws_access_key_id=" + key)
# Show the users the string we placed in the file above
print("Contents of file: \'{}\':".format(target + '/aws-credentials'))
with open(target + '/aws-credentials', "r") as f:
blah = f.read().rstrip()
print("\'{}\'\n".format(blah))
# Instantiate the GitSecrets class
gs = GitSecrets()
# Scan the repository which should find a string because we created a file
# with a sample AWS_ACCESS_KEY above
print("Now scanning directory \'{}\' for secrets".format(target))
if gs.scan_recursively(target):
print("Found verboten string in path \'{}\'".format(target))
# Remove temp dir
shutil.rmtree(target)
| 35.855422
| 113
| 0.686828
|
951acd0b50a0fc594bef08fb82e7d05f1c53e480
| 31,473
|
py
|
Python
|
shapely/geometry/base.py
|
IncoCura/Shapely
|
5d18af283e485a427d121e05fbf8e9968db4a569
|
[
"BSD-3-Clause"
] | null | null | null |
shapely/geometry/base.py
|
IncoCura/Shapely
|
5d18af283e485a427d121e05fbf8e9968db4a569
|
[
"BSD-3-Clause"
] | null | null | null |
shapely/geometry/base.py
|
IncoCura/Shapely
|
5d18af283e485a427d121e05fbf8e9968db4a569
|
[
"BSD-3-Clause"
] | 1
|
2019-12-27T16:56:38.000Z
|
2019-12-27T16:56:38.000Z
|
"""Base geometry class and utilities
Note: a third, z, coordinate value may be used when constructing
geometry objects, but has no effect on geometric analysis. All
operations are performed in the x-y plane. Thus, geometries with
different z values may intersect or be equal.
"""
from binascii import a2b_hex
from ctypes import pointer, c_size_t, c_char_p, c_void_p
from itertools import islice
import math
import sys
from warnings import warn
from functools import wraps
from shapely.affinity import affine_transform
from shapely.coords import CoordinateSequence
from shapely.errors import WKBReadingError, WKTReadingError
from shapely.geos import WKBWriter, WKTWriter
from shapely.geos import lgeos
from shapely.impl import DefaultImplementation, delegated
if sys.version_info[0] < 3:
range = xrange
integer_types = (int, long)
else:
integer_types = (int,)
try:
import numpy as np
integer_types = integer_types + (np.integer,)
except ImportError:
pass
GEOMETRY_TYPES = [
'Point',
'LineString',
'LinearRing',
'Polygon',
'MultiPoint',
'MultiLineString',
'MultiPolygon',
'GeometryCollection',
]
def dump_coords(geom):
"""Dump coordinates of a geometry in the same order as data packing"""
if not isinstance(geom, BaseGeometry):
raise ValueError('Must be instance of a geometry class; found ' +
geom.__class__.__name__)
elif geom.type in ('Point', 'LineString', 'LinearRing'):
return geom.coords[:]
elif geom.type == 'Polygon':
return geom.exterior.coords[:] + [i.coords[:] for i in geom.interiors]
elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':
# Recursive call
return [dump_coords(part) for part in geom]
else:
raise ValueError('Unhandled geometry type: ' + repr(geom.type))
def geometry_type_name(g):
if g is None:
raise ValueError("Null geometry has no type")
return GEOMETRY_TYPES[lgeos.GEOSGeomTypeId(g)]
def geom_factory(g, parent=None):
# Abstract geometry factory for use with topological methods below
if not g:
raise ValueError("No Shapely geometry can be created from null value")
ob = BaseGeometry()
geom_type = geometry_type_name(g)
# TODO: check cost of dynamic import by profiling
mod = __import__(
'shapely.geometry',
globals(),
locals(),
[geom_type],
)
ob.__class__ = getattr(mod, geom_type)
ob._geom = g
ob.__p__ = parent
if lgeos.methods['has_z'](g):
ob._ndim = 3
else:
ob._ndim = 2
ob._is_empty = False
return ob
def geom_from_wkt(data):
warn("`geom_from_wkt` is deprecated. Use `geos.wkt_reader.read(data)`.",
DeprecationWarning)
if sys.version_info[0] >= 3:
data = data.encode('ascii')
geom = lgeos.GEOSGeomFromWKT(c_char_p(data))
if not geom:
raise WKTReadingError(
"Could not create geometry because of errors while reading input.")
return geom_factory(geom)
def geom_to_wkt(ob):
warn("`geom_to_wkt` is deprecated. Use `geos.wkt_writer.write(ob)`.",
DeprecationWarning)
if ob is None or ob._geom is None:
raise ValueError("Null geometry supports no operations")
return lgeos.GEOSGeomToWKT(ob._geom)
def deserialize_wkb(data):
geom = lgeos.GEOSGeomFromWKB_buf(c_char_p(data), c_size_t(len(data)))
if not geom:
raise WKBReadingError(
"Could not create geometry because of errors while reading input.")
return geom
def geom_from_wkb(data):
warn("`geom_from_wkb` is deprecated. Use `geos.wkb_reader.read(data)`.",
DeprecationWarning)
return geom_factory(deserialize_wkb(data))
def geom_to_wkb(ob):
warn("`geom_to_wkb` is deprecated. Use `geos.wkb_writer.write(ob)`.",
DeprecationWarning)
if ob is None or ob._geom is None:
raise ValueError("Null geometry supports no operations")
size = c_size_t()
return lgeos.GEOSGeomToWKB_buf(c_void_p(ob._geom), pointer(size))
def geos_geom_from_py(ob, create_func=None):
"""Helper function for geos_*_from_py functions in each geom type.
If a create_func is specified the coodinate sequence is cloned and a new
geometry is created with it, otherwise the geometry is cloned directly.
This behaviour is useful for converting between LineString and LinearRing
objects.
"""
if create_func is None:
geom = lgeos.GEOSGeom_clone(ob._geom)
else:
cs = lgeos.GEOSGeom_getCoordSeq(ob._geom)
cs = lgeos.GEOSCoordSeq_clone(cs)
geom = create_func(cs)
N = ob._ndim
return geom, N
def exceptNull(func):
"""Decorator which helps avoid GEOS operations on null pointers."""
@wraps(func)
def wrapper(*args, **kwargs):
if not args[0]._geom or args[0].is_empty:
raise ValueError("Null/empty geometry supports no operations")
return func(*args, **kwargs)
return wrapper
class CAP_STYLE(object):
round = 1
flat = 2
square = 3
class JOIN_STYLE(object):
round = 1
mitre = 2
bevel = 3
EMPTY = deserialize_wkb(a2b_hex(b'010700000000000000'))
class BaseGeometry(object):
"""
Provides GEOS spatial predicates and topological operations.
"""
# Attributes
# ----------
# __geom__ : c_void_p
# Cached ctypes pointer to GEOS geometry. Not to be accessed.
# _geom : c_void_p
# Property by which the GEOS geometry is accessed.
# __p__ : object
# Parent (Shapely) geometry
# _ctypes_data : object
# Cached ctypes data buffer
# _ndim : int
# Number of dimensions (2 or 3, generally)
# _crs : object
# Coordinate reference system. Available for Shapely extensions, but
# not implemented here.
# _other_owned : bool
# True if this object's GEOS geometry is owned by another as in the
# case of a multipart geometry member.
__geom__ = EMPTY
__p__ = None
_ctypes_data = None
_ndim = None
_crs = None
_other_owned = False
_is_empty = True
# Backend config
impl = DefaultImplementation
# a reference to the so/dll proxy to preserve access during clean up
_lgeos = lgeos
def empty(self, val=EMPTY):
# TODO: defer cleanup to the implementation. We shouldn't be
# explicitly calling a lgeos method here.
if not self._is_empty and not self._other_owned and self.__geom__:
try:
self._lgeos.GEOSGeom_destroy(self.__geom__)
except (AttributeError, TypeError):
pass # _lgeos might be empty on shutdown
self._is_empty = True
self.__geom__ = val
def __del__(self):
self.empty(val=None)
self.__p__ = None
def __str__(self):
return self.wkt
# To support pickling
def __reduce__(self):
return (self.__class__, (), self.wkb)
def __setstate__(self, state):
self.empty()
self.__geom__ = deserialize_wkb(state)
self._is_empty = False
if lgeos.methods['has_z'](self.__geom__):
self._ndim = 3
else:
self._ndim = 2
@property
def _geom(self):
return self.__geom__
@_geom.setter
def _geom(self, val):
self.empty()
self._is_empty = val in [EMPTY, None]
self.__geom__ = val
# Operators
# ---------
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __sub__(self, other):
return self.difference(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def __eq__(self, other):
return (
type(other) == type(self) and
tuple(self.coords) == tuple(other.coords)
)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = None
# Array and ctypes interfaces
# ---------------------------
@property
def ctypes(self):
"""Return ctypes buffer"""
raise NotImplementedError
@property
def array_interface_base(self):
if sys.byteorder == 'little':
typestr = '<f8'
elif sys.byteorder == 'big':
typestr = '>f8'
else:
raise ValueError(
"Unsupported byteorder: neither little nor big-endian")
return {
'version': 3,
'typestr': typestr,
'data': self.ctypes,
}
@property
def __array_interface__(self):
"""Provide the Numpy array protocol."""
raise NotImplementedError
# Coordinate access
# -----------------
def _get_coords(self):
"""Access to geometry's coordinates (CoordinateSequence)"""
if self.is_empty:
return []
return CoordinateSequence(self)
def _set_coords(self, ob):
raise NotImplementedError(
"set_coords must be provided by derived classes")
coords = property(_get_coords, _set_coords)
@property
def xy(self):
"""Separate arrays of X and Y coordinate values"""
raise NotImplementedError
# Python feature protocol
@property
def __geo_interface__(self):
"""Dictionary representation of the geometry"""
raise NotImplementedError
# Type of geometry and its representations
# ----------------------------------------
def geometryType(self):
return geometry_type_name(self._geom)
@property
def type(self):
return self.geometryType()
def to_wkb(self):
warn("`to_wkb` is deprecated. Use the `wkb` property.",
DeprecationWarning)
return geom_to_wkb(self)
def to_wkt(self):
warn("`to_wkt` is deprecated. Use the `wkt` property.",
DeprecationWarning)
return geom_to_wkt(self)
@property
def wkt(self):
"""WKT representation of the geometry"""
return WKTWriter(lgeos).write(self)
@property
def wkb(self):
"""WKB representation of the geometry"""
return WKBWriter(lgeos).write(self)
@property
def wkb_hex(self):
"""WKB hex representation of the geometry"""
return WKBWriter(lgeos).write_hex(self)
def svg(self, scale_factor=1., **kwargs):
"""Raises NotImplementedError"""
raise NotImplementedError
def _repr_svg_(self):
"""SVG representation for iPython notebook"""
svg_top = '<svg xmlns="http://www.w3.org/2000/svg" ' \
'xmlns:xlink="http://www.w3.org/1999/xlink" '
if self.is_empty:
return svg_top + '/>'
else:
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = self.bounds
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = self.buffer(1).bounds
else:
# Expand bounds by a fraction of the data ranges
expand = 0.04 # or 4%, same as R plots
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100., dx]), 300])
height = min([max([100., dy]), 300])
try:
scale_factor = max([dx, dy]) / max([width, height])
except ZeroDivisionError:
scale_factor = 1.
view_box = "{} {} {} {}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{})".format(ymax + ymin)
return svg_top + (
'width="{1}" height="{2}" viewBox="{0}" '
'preserveAspectRatio="xMinYMin meet">'
'<g transform="{3}">{4}</g></svg>'
).format(view_box, width, height, transform,
self.svg(scale_factor))
@property
def geom_type(self):
"""Name of the geometry's type, such as 'Point'"""
return self.geometryType()
# Real-valued properties and methods
# ----------------------------------
@property
def area(self):
"""Unitless area of the geometry (float)"""
return self.impl['area'](self)
def distance(self, other):
"""Unitless distance to other geometry (float)"""
return self.impl['distance'](self, other)
def hausdorff_distance(self, other):
"""Unitless hausdorff distance to other geometry (float)"""
return self.impl['hausdorff_distance'](self, other)
@property
def length(self):
"""Unitless length of the geometry (float)"""
return self.impl['length'](self)
# Topological properties
# ----------------------
@property
def boundary(self):
"""Returns a lower dimension geometry that bounds the object
The boundary of a polygon is a line, the boundary of a line is a
collection of points. The boundary of a point is an empty (null)
collection.
"""
return geom_factory(self.impl['boundary'](self))
@property
def bounds(self):
"""Returns minimum bounding region (minx, miny, maxx, maxy)"""
if self.is_empty:
return ()
else:
return self.impl['bounds'](self)
@property
def centroid(self):
"""Returns the geometric center of the object"""
return geom_factory(self.impl['centroid'](self))
@delegated
def representative_point(self):
"""Returns a point guaranteed to be within the object, cheaply."""
return geom_factory(self.impl['representative_point'](self))
@property
def convex_hull(self):
"""Imagine an elastic band stretched around the geometry: that's a
convex hull, more or less
The convex hull of a three member multipoint, for example, is a
triangular polygon.
"""
return geom_factory(self.impl['convex_hull'](self))
@property
def envelope(self):
"""A figure that envelopes the geometry"""
return geom_factory(self.impl['envelope'](self))
@property
def minimum_rotated_rectangle(self):
"""Returns the general minimum bounding rectangle of
the geometry. Can possibly be rotated. If the convex hull
of the object is a degenerate (line or point) this same degenerate
is returned.
"""
# first compute the convex hull
hull = self.convex_hull
try:
coords = hull.exterior.coords
except AttributeError: # may be a Point or a LineString
return hull
# generate the edge vectors between the convex hull's coords
edges = ((pt2[0] - pt1[0], pt2[1] - pt1[1]) for pt1, pt2 in zip(
coords, islice(coords, 1, None)))
def _transformed_rects():
for dx, dy in edges:
# compute the normalized direction vector of the edge
# vector.
length = math.sqrt(dx ** 2 + dy ** 2)
ux, uy = dx / length, dy / length
# compute the normalized perpendicular vector
vx, vy = -uy, ux
# transform hull from the original coordinate system to
# the coordinate system defined by the edge and compute
# the axes-parallel bounding rectangle.
transf_rect = affine_transform(
hull, (ux, uy, vx, vy, 0, 0)).envelope
# yield the transformed rectangle and a matrix to
# transform it back to the original coordinate system.
yield (transf_rect, (ux, vx, uy, vy, 0, 0))
# check for the minimum area rectangle and return it
transf_rect, inv_matrix = min(
_transformed_rects(), key=lambda r: r[0].area)
return affine_transform(transf_rect, inv_matrix)
def buffer(self, distance, resolution=16, quadsegs=None,
cap_style=CAP_STYLE.round, join_style=JOIN_STYLE.round,
mitre_limit=5.0):
"""Returns a geometry with an envelope at a distance from the object's
envelope
A negative distance has a "shrink" effect. A zero distance may be used
to "tidy" a polygon. The resolution of the buffer around each vertex of
the object increases by increasing the resolution keyword parameter
or second positional parameter. Note: the use of a `quadsegs` parameter
is deprecated and will be gone from the next major release.
The styles of caps are: CAP_STYLE.round (1), CAP_STYLE.flat (2), and
CAP_STYLE.square (3).
The styles of joins between offset segments are: JOIN_STYLE.round (1),
JOIN_STYLE.mitre (2), and JOIN_STYLE.bevel (3).
The mitre limit ratio is used for very sharp corners. The mitre ratio
is the ratio of the distance from the corner to the end of the mitred
offset corner. When two line segments meet at a sharp angle, a miter
join will extend the original geometry. To prevent unreasonable
geometry, the mitre limit allows controlling the maximum length of the
join corner. Corners with a ratio which exceed the limit will be
beveled.
Example:
>>> from shapely.wkt import loads
>>> g = loads('POINT (0.0 0.0)')
>>> g.buffer(1.0).area # 16-gon approx of a unit radius circle
3.1365484905459389
>>> g.buffer(1.0, 128).area # 128-gon approximation
3.1415138011443009
>>> g.buffer(1.0, 3).area # triangle approximation
3.0
>>> list(g.buffer(1.0, cap_style='square').exterior.coords)
[(1.0, 1.0), (1.0, -1.0), (-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0)]
>>> g.buffer(1.0, cap_style='square').area
4.0
"""
if quadsegs is not None:
warn(
"The `quadsegs` argument is deprecated. Use `resolution`.",
DeprecationWarning)
res = quadsegs
else:
res = resolution
if mitre_limit == 0.0:
raise ValueError(
'Cannot compute offset from zero-length line segment')
if cap_style == CAP_STYLE.round and join_style == JOIN_STYLE.round:
return geom_factory(self.impl['buffer'](self, distance, res))
if 'buffer_with_style' not in self.impl:
raise NotImplementedError("Styled buffering not available for "
"GEOS versions < 3.2.")
return geom_factory(self.impl['buffer_with_style'](self, distance, res,
cap_style,
join_style,
mitre_limit))
@delegated
def simplify(self, tolerance, preserve_topology=True):
"""Returns a simplified geometry produced by the Douglas-Peucker
algorithm
Coordinates of the simplified geometry will be no more than the
tolerance distance from the original. Unless the topology preserving
option is used, the algorithm may produce self-intersecting or
otherwise invalid geometries.
"""
if preserve_topology:
op = self.impl['topology_preserve_simplify']
else:
op = self.impl['simplify']
return geom_factory(op(self, tolerance))
# Binary operations
# -----------------
def difference(self, other):
"""Returns the difference of the geometries"""
return geom_factory(self.impl['difference'](self, other))
def intersection(self, other):
"""Returns the intersection of the geometries"""
return geom_factory(self.impl['intersection'](self, other))
def symmetric_difference(self, other):
"""Returns the symmetric difference of the geometries
(Shapely geometry)"""
return geom_factory(self.impl['symmetric_difference'](self, other))
def union(self, other):
"""Returns the union of the geometries (Shapely geometry)"""
return geom_factory(self.impl['union'](self, other))
# Unary predicates
# ----------------
@property
def has_z(self):
"""True if the geometry's coordinate sequence(s) have z values (are
3-dimensional)"""
return bool(self.impl['has_z'](self))
@property
def is_empty(self):
"""True if the set of points in this geometry is empty, else False"""
return (self._geom is None) or bool(self.impl['is_empty'](self))
@property
def is_ring(self):
"""True if the geometry is a closed ring, else False"""
return bool(self.impl['is_ring'](self))
@property
def is_closed(self):
"""True if the geometry is closed, else False
Applicable only to 1-D geometries."""
if self.geom_type == 'LinearRing':
return True
elif self.geom_type == 'LineString':
if 'is_closed' in self.impl:
return bool(self.impl['is_closed'](self))
else:
return self.coords[0] == self.coords[-1]
else:
return False
@property
def is_simple(self):
"""True if the geometry is simple, meaning that any self-intersections
are only at boundary points, else False"""
return bool(self.impl['is_simple'](self))
@property
def is_valid(self):
"""True if the geometry is valid (definition depends on sub-class),
else False"""
return bool(self.impl['is_valid'](self))
# Binary predicates
# -----------------
def relate(self, other):
"""Returns the DE-9IM intersection matrix for the two geometries
(string)"""
return self.impl['relate'](self, other)
def covers(self, other):
"""Returns True if the geometry covers the other, else False"""
return bool(self.impl['covers'](self, other))
def contains(self, other):
"""Returns True if the geometry contains the other, else False"""
return bool(self.impl['contains'](self, other))
def crosses(self, other):
"""Returns True if the geometries cross, else False"""
return bool(self.impl['crosses'](self, other))
def disjoint(self, other):
"""Returns True if geometries are disjoint, else False"""
return bool(self.impl['disjoint'](self, other))
def equals(self, other):
"""Returns True if geometries are equal, else False
Refers to point-set equality (or topological equality), and is equivalent to
(self.within(other) & self.contains(other))
"""
return bool(self.impl['equals'](self, other))
def intersects(self, other):
"""Returns True if geometries intersect, else False"""
return bool(self.impl['intersects'](self, other))
def overlaps(self, other):
"""Returns True if geometries overlap, else False"""
return bool(self.impl['overlaps'](self, other))
def touches(self, other):
"""Returns True if geometries touch, else False"""
return bool(self.impl['touches'](self, other))
def within(self, other):
"""Returns True if geometry is within the other, else False"""
return bool(self.impl['within'](self, other))
def equals_exact(self, other, tolerance):
"""Returns True if geometries are equal to within a specified
tolerance
Refers to coordinate equality, which requires coordinates to be equal
and in the same order for all components of a geometry
"""
return bool(self.impl['equals_exact'](self, other, tolerance))
def almost_equals(self, other, decimal=6):
"""Returns True if geometries are equal at all coordinates to a
specified decimal place
Refers to approximate coordinate equality, which requires coordinates be
approximately equal and in the same order for all components of a geometry.
"""
return self.equals_exact(other, 0.5 * 10**(-decimal))
def relate_pattern(self, other, pattern):
"""Returns True if the DE-9IM string code for the relationship between
the geometries satisfies the pattern, else False"""
pattern = c_char_p(pattern.encode('ascii'))
return bool(self.impl['relate_pattern'](self, other, pattern))
# Linear referencing
# ------------------
@delegated
def project(self, other, normalized=False):
"""Returns the distance along this geometry to a point nearest the
specified point
If the normalized arg is True, return the distance normalized to the
length of the linear geometry.
"""
if normalized:
op = self.impl['project_normalized']
else:
op = self.impl['project']
return op(self, other)
@delegated
def interpolate(self, distance, normalized=False):
"""Return a point at the specified distance along a linear geometry
Negative length values are taken as measured in the reverse
direction from the end of the geometry. Out-of-range index
values are handled by clamping them to the valid range of values.
If the normalized arg is True, the distance will be interpreted as a
fraction of the geometry's length.
"""
if normalized:
op = self.impl['interpolate_normalized']
else:
op = self.impl['interpolate']
return geom_factory(op(self, distance))
class BaseMultipartGeometry(BaseGeometry):
def shape_factory(self, *args):
# Factory for part instances, usually a geometry class
raise NotImplementedError("To be implemented by derived classes")
@property
def ctypes(self):
raise NotImplementedError(
"Multi-part geometries have no ctypes representations")
@property
def __array_interface__(self):
"""Provide the Numpy array protocol."""
raise NotImplementedError("Multi-part geometries do not themselves "
"provide the array interface")
def _get_coords(self):
raise NotImplementedError("Sub-geometries may have coordinate "
"sequences, but collections do not")
def _set_coords(self, ob):
raise NotImplementedError("Sub-geometries may have coordinate "
"sequences, but collections do not")
@property
def coords(self):
raise NotImplementedError(
"Multi-part geometries do not provide a coordinate sequence")
@property
def geoms(self):
if self.is_empty:
return []
return GeometrySequence(self, self.shape_factory)
def __iter__(self):
if not self.is_empty:
return iter(self.geoms)
else:
return iter([])
def __len__(self):
if not self.is_empty:
return len(self.geoms)
else:
return 0
def __getitem__(self, index):
if not self.is_empty:
return self.geoms[index]
else:
return ()[index]
def __eq__(self, other):
return (
type(other) == type(self) and
len(self) == len(other) and
all(x == y for x, y in zip(self, other))
)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = None
def svg(self, scale_factor=1., color=None):
"""Returns a group of SVG elements for the multipart geometry.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
color : str, optional
Hex string for stroke or fill color. Default is to use "#66cc99"
if geometry is valid, and "#ff3333" if invalid.
"""
if self.is_empty:
return '<g />'
if color is None:
color = "#66cc99" if self.is_valid else "#ff3333"
return '<g>' + \
''.join(p.svg(scale_factor, color) for p in self) + \
'</g>'
class GeometrySequence(object):
"""
Iterative access to members of a homogeneous multipart geometry.
"""
# Attributes
# ----------
# _factory : callable
# Returns instances of Shapely geometries
# _geom : c_void_p
# Ctypes pointer to the parent's GEOS geometry
# _ndim : int
# Number of dimensions (2 or 3, generally)
# __p__ : object
# Parent (Shapely) geometry
shape_factory = None
_geom = None
__p__ = None
_ndim = None
def __init__(self, parent, type):
self.shape_factory = type
self.__p__ = parent
def _update(self):
self._geom = self.__p__._geom
self._ndim = self.__p__._ndim
def _get_geom_item(self, i):
g = self.shape_factory()
g._other_owned = True
g._geom = lgeos.GEOSGetGeometryN(self._geom, i)
g._ndim = self._ndim
g.__p__ = self
return g
def __iter__(self):
self._update()
for i in range(self.__len__()):
yield self._get_geom_item(i)
def __len__(self):
self._update()
return lgeos.GEOSGetNumGeometries(self._geom)
def __getitem__(self, key):
self._update()
m = self.__len__()
if isinstance(key, integer_types):
if key + m < 0 or key >= m:
raise IndexError("index out of range")
if key < 0:
i = m + key
else:
i = key
return self._get_geom_item(i)
elif isinstance(key, slice):
if type(self) == HeterogeneousGeometrySequence:
raise TypeError(
"Heterogenous geometry collections are not sliceable")
res = []
start, stop, stride = key.indices(m)
for i in range(start, stop, stride):
res.append(self._get_geom_item(i))
return type(self.__p__)(res or None)
else:
raise TypeError("key must be an index or slice")
@property
def _longest(self):
max = 0
for g in iter(self):
l = len(g.coords)
if l > max:
max = l
class HeterogeneousGeometrySequence(GeometrySequence):
"""
Iterative access to a heterogeneous sequence of geometries.
"""
def __init__(self, parent):
super(HeterogeneousGeometrySequence, self).__init__(parent, None)
def _get_geom_item(self, i):
sub = lgeos.GEOSGetGeometryN(self._geom, i)
g = geom_factory(sub, parent=self)
g._other_owned = True
return g
class EmptyGeometry(BaseGeometry):
def __init__(self):
"""Create an empty geometry."""
BaseGeometry.__init__(self)
def _test():
"""Test runner"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| 32.313142
| 84
| 0.602326
|
b5ac105a1eecde7bec42a08a378ace8ccda31133
| 594
|
py
|
Python
|
order/migrations/0021_auto_20211122_0834.py
|
gheyderov/E-commerce-website
|
9a87e8e6658a69fb017bdc3b36d6dc5417e3124e
|
[
"MIT"
] | null | null | null |
order/migrations/0021_auto_20211122_0834.py
|
gheyderov/E-commerce-website
|
9a87e8e6658a69fb017bdc3b36d6dc5417e3124e
|
[
"MIT"
] | null | null | null |
order/migrations/0021_auto_20211122_0834.py
|
gheyderov/E-commerce-website
|
9a87e8e6658a69fb017bdc3b36d6dc5417e3124e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-11-22 08:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('order', '0020_auto_20211122_0825'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='basket_items',
),
migrations.AddField(
model_name='order',
name='basket_items',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='order.basketitem'),
),
]
| 24.75
| 116
| 0.614478
|
c6c381c7e793e64384ad2464c153a3208d682929
| 2,872
|
py
|
Python
|
perfkitbenchmarker/data/edw/script_driver.py
|
msidana/PerfKitBenchmarker
|
2784642d3e6b20b3f474c4e27edb1ef163804f66
|
[
"Apache-2.0"
] | 1
|
2018-08-28T19:33:21.000Z
|
2018-08-28T19:33:21.000Z
|
perfkitbenchmarker/data/edw/script_driver.py
|
msidana/PerfKitBenchmarker
|
2784642d3e6b20b3f474c4e27edb1ef163804f66
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/data/edw/script_driver.py
|
msidana/PerfKitBenchmarker
|
2784642d3e6b20b3f474c4e27edb1ef163804f66
|
[
"Apache-2.0"
] | null | null | null |
"""Driver for running a script against a EDW cluster.
Driver compiles the provider specific script execution command and returns the
time taken to execute the script in seconds or -1 if the script fails.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
from subprocess import call
import time
from absl import app
from absl import flags
import provider_specific_script_driver
__author__ = 'p3rf@google.com'
flags.DEFINE_string('script', None, 'SQL script which contains the query.')
flags.DEFINE_string('logfile_suffix', 'log', 'Suffix to use for the output and '
'error files.')
flags.DEFINE_multi_string('failing_scripts', [],
'List of failing scripts whose execution should be '
'skipped.')
FLAGS = flags.FLAGS
DRIVER_NAME = './script_runner.sh'
JOB_ID_KEY = 'INFO:googleapiclient.model:jobId:'
API_LOG_FILE = 'apilog.out'
def default_logfile_names(script, suffix):
"""Method to return the names for output and error log files."""
suffix = script.split('.')[0] if suffix is None else suffix
output_logfile = '{}_out.txt'.format(suffix)
error_logfile = '{}_err.txt'.format(suffix)
return output_logfile, error_logfile
def execute_script(script, logfile_suffix):
"""Method to execute a sql script on a EDW cluster.
Arguments:
script: SQL script which contains the query.
logfile_suffix: Suffix to use for the output and error files.
Returns:
Dictionary containing the name of the script and its execution time (-1 if
the script fails)
"""
response_status = 1 # assume failure by default
job_id = 'undefined_job'
if script not in FLAGS.failing_scripts:
output, error = default_logfile_names(script, logfile_suffix)
cmd = provider_specific_script_driver.generate_provider_specific_cmd_list(
script, DRIVER_NAME, output, error)
start_time = time.time()
response_status = call(cmd)
execution_time = -1 if (response_status != 0) else round((time.time() -
start_time), 2)
try:
with open(API_LOG_FILE) as fp:
line = fp.readline()
while line:
line_tokens = line.strip().split()
if len(line_tokens) > 1 and line_tokens[0] == JOB_ID_KEY:
job_id = line.strip().split()[1]
break
line = fp.readline()
except IOError:
pass
script_execution_details = {'execution_time': execution_time,
'job_id': job_id}
results = {script: script_execution_details}
return json.dumps(results)
def main(argv):
del argv
print(execute_script(FLAGS.script, FLAGS.logfile_suffix))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
app.run(main)
| 31.56044
| 80
| 0.68663
|
30784f7b10bc674d258bc31324cea97f9778595b
| 407
|
py
|
Python
|
ctre/__init__.py
|
stmobo/robotpy-ctre
|
7ef83ae6f6bc0d5ef793dfb6e96ac5d641320aeb
|
[
"Apache-2.0"
] | null | null | null |
ctre/__init__.py
|
stmobo/robotpy-ctre
|
7ef83ae6f6bc0d5ef793dfb6e96ac5d641320aeb
|
[
"Apache-2.0"
] | null | null | null |
ctre/__init__.py
|
stmobo/robotpy-ctre
|
7ef83ae6f6bc0d5ef793dfb6e96ac5d641320aeb
|
[
"Apache-2.0"
] | null | null | null |
from .wpi_talonsrx import WPI_TalonSRX
from .wpi_victorspx import WPI_VictorSPX
from .canifier import CANifier
from .pigeonimu import PigeonIMU
from ._impl import (
ControlMode,
FeedbackDevice,
RemoteFeedbackDevice,
NeutralMode,
)
from .trajectorypoint import TrajectoryPoint
try:
from .version import __version__
except ImportError: # pragma: nocover
__version__ = 'master'
| 20.35
| 44
| 0.769042
|
e1462ea1619bc959f8fd5becf206a660ff1303b5
| 7,344
|
py
|
Python
|
plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py
|
ansible-collections/dellemc_networking.sonic
|
ca9382bb5a3bd021d3f9766077e8e452f710a1ce
|
[
"Apache-2.0"
] | null | null | null |
plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py
|
ansible-collections/dellemc_networking.sonic
|
ca9382bb5a3bd021d3f9766077e8e452f710a1ce
|
[
"Apache-2.0"
] | null | null | null |
plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py
|
ansible-collections/dellemc_networking.sonic
|
ca9382bb5a3bd021d3f9766077e8e452f710a1ce
|
[
"Apache-2.0"
] | 2
|
2020-03-11T12:19:45.000Z
|
2020-03-11T15:37:53.000Z
|
#
# -*- coding: utf-8 -*-
# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The sonic l3_interfaces fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
utils,
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.l3_interfaces.l3_interfaces import L3_interfacesArgs
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
to_request,
edit_config
)
from ansible.module_utils.connection import ConnectionError
class L3_interfacesFacts(object):
""" The sonic l3_interfaces fact class
"""
loop_backs = ","
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = L3_interfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def get_l3_interfaces(self):
url = "data/openconfig-interfaces:interfaces/interface"
method = "GET"
request = [{"path": url, "method": method}]
try:
response = edit_config(self._module, to_request(self._module, request))
except ConnectionError as exc:
self._module.fail_json(msg=str(exc), code=exc.code)
l3_lists = []
if "openconfig-interfaces:interface" in response[0][1]:
l3_lists = response[0][1].get("openconfig-interfaces:interface", [])
l3_configs = []
for l3 in l3_lists:
l3_dict = dict()
l3_name = l3["name"]
if l3_name == "eth0":
continue
l3_dict['name'] = l3_name
ip = None
anycast_addr = list()
if l3.get('openconfig-vlan:routed-vlan'):
ip = l3['openconfig-vlan:routed-vlan']
if ip.get('openconfig-if-ip:ipv4', None) and ip['openconfig-if-ip:ipv4'].get('openconfig-interfaces-ext:sag-ipv4', None):
if ip['openconfig-if-ip:ipv4']['openconfig-interfaces-ext:sag-ipv4'].get('config', None):
if ip['openconfig-if-ip:ipv4']['openconfig-interfaces-ext:sag-ipv4']['config'].get('static-anycast-gateway', None):
anycast_addr = ip['openconfig-if-ip:ipv4']['openconfig-interfaces-ext:sag-ipv4']['config']['static-anycast-gateway']
else:
ip = l3.get('subinterfaces', {}).get('subinterface', [{}])[0]
l3_dict['ipv4'] = dict()
l3_ipv4 = list()
if anycast_addr:
l3_dict['ipv4']['anycast_addresses'] = anycast_addr
elif 'openconfig-if-ip:ipv4' in ip and 'addresses' in ip['openconfig-if-ip:ipv4'] and 'address' in ip['openconfig-if-ip:ipv4']['addresses']:
for ipv4 in ip['openconfig-if-ip:ipv4']['addresses']['address']:
if ipv4.get('config') and ipv4.get('config').get('ip'):
temp = dict()
temp['address'] = str(ipv4['config']['ip']) + '/' + str(ipv4['config']['prefix-length'])
temp['secondary'] = ipv4['config']['secondary']
l3_ipv4.append(temp)
if l3_ipv4:
l3_dict['ipv4']['addresses'] = l3_ipv4
l3_dict['ipv6'] = dict()
l3_ipv6 = list()
if 'openconfig-if-ip:ipv6' in ip:
if 'addresses' in ip['openconfig-if-ip:ipv6'] and 'address' in ip['openconfig-if-ip:ipv6']['addresses']:
for ipv6 in ip['openconfig-if-ip:ipv6']['addresses']['address']:
if ipv6.get('config') and ipv6.get('config').get('ip'):
temp = dict()
temp['address'] = str(ipv6['config']['ip']) + '/' + str(ipv6['config']['prefix-length'])
l3_ipv6.append(temp)
if l3_ipv6:
l3_dict['ipv6']['addresses'] = l3_ipv6
if 'config' in ip['openconfig-if-ip:ipv6'] and 'enabled' in ip['openconfig-if-ip:ipv6']['config']:
l3_dict['ipv6']['enabled'] = ip['openconfig-if-ip:ipv6']['config']['enabled']
l3_configs.append(l3_dict)
return l3_configs
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for l3_interfaces
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if connection: # just for linting purposes, remove
pass
if not data:
resources = self.get_l3_interfaces()
objs = []
for resource in resources:
if resource:
obj = self.render_config(self.generated_spec, resource)
obj = self.transform_config(obj)
if obj:
objs.append(obj)
ansible_facts['ansible_network_resources'].pop('l3_interfaces', None)
facts = {}
if objs:
params = utils.validate_config(self.argument_spec, {'config': objs})
facts['l3_interfaces'] = params['config']
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
return conf
def transform_config(self, conf):
exist_cfg = conf
trans_cfg = None
is_loop_back = False
name = exist_cfg['name']
if name.startswith('Loopback'):
is_loop_back = True
pos = name.find('|')
if pos > 0:
name = name[0:pos]
if not (is_loop_back and self.is_loop_back_already_esist(name)) and (name != "eth0"):
trans_cfg = dict()
trans_cfg['name'] = name
if is_loop_back:
self.update_loop_backs(name)
trans_cfg['ipv4'] = exist_cfg.get('ipv4', {})
trans_cfg['ipv6'] = exist_cfg.get('ipv6', {})
return trans_cfg
def reset_loop_backs(self):
self.loop_backs = ","
def update_loop_backs(self, loop_back):
self.loop_backs += "{Loopback},".format(Loopback=loop_back)
def is_loop_back_already_esist(self, loop_back):
return(",{0},".format(loop_back) in self.loop_backs)
| 39.483871
| 152
| 0.587146
|
1f7d86def2f3f760566e7503d01ec197d66783ae
| 591
|
py
|
Python
|
encryptfinance/users/urls.py
|
dark-codr/encryptfinance
|
573a8179c3a7c4b0f68d71bc9d461246f6fdba29
|
[
"Apache-2.0"
] | null | null | null |
encryptfinance/users/urls.py
|
dark-codr/encryptfinance
|
573a8179c3a7c4b0f68d71bc9d461246f6fdba29
|
[
"Apache-2.0"
] | null | null | null |
encryptfinance/users/urls.py
|
dark-codr/encryptfinance
|
573a8179c3a7c4b0f68d71bc9d461246f6fdba29
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from django.urls import path
from encryptfinance.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
user_verify_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("<username>/~update/", view=user_update_view, name="update"),
path("<username>/", view=user_detail_view, name="detail"),
path("<username>/~verify/", view=user_verify_view, name="verify"),
# path("<str:username>/profile/", view=user_profile_view, name="profile"),
]
| 29.55
| 78
| 0.712352
|
6bc69ffd5cd3244a545448bb077bf7b59d2250cb
| 602,306
|
py
|
Python
|
python/paddle/fluid/layers/nn.py
|
Joejiong/Paddle
|
6d6ea569dc1e9ff15fdc774c79276b0f79444f5e
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/nn.py
|
Joejiong/Paddle
|
6d6ea569dc1e9ff15fdc774c79276b0f79444f5e
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/nn.py
|
Joejiong/Paddle
|
6d6ea569dc1e9ff15fdc774c79276b0f79444f5e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the neural network.
"""
from __future__ import print_function
import os
import inspect
import warnings
import numpy as np
import six
import paddle
from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only
from .. import dygraph_utils
from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor
from . import utils
from .. import unique_name
from functools import reduce
from .. import core
from ...utils import deprecated
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
from paddle.utils import deprecated
__all__ = [
'fc',
'embedding',
'linear_chain_crf',
'crf_decoding',
'cos_sim',
'chunk_eval',
'conv2d',
'conv3d',
'softmax',
'pool2d',
'pool3d',
'adaptive_pool2d',
'adaptive_pool3d',
'batch_norm',
'inplace_abn',
'instance_norm',
'data_norm',
'conv2d_transpose',
'conv3d_transpose',
'reduce_sum',
'reduce_mean',
'reduce_max',
'reduce_min',
'reduce_prod',
'reduce_all',
'reduce_any',
'dropout',
'split',
'ctc_greedy_decoder',
'l2_normalize',
'matmul',
'topk',
'transpose',
'im2sequence',
'row_conv',
'multiplex',
'layer_norm',
'group_norm',
'spectral_norm',
'smooth_l1',
'one_hot',
'autoincreased_step_counter',
'reshape',
'squeeze',
'unsqueeze',
'lod_reset',
'lod_append',
'lrn',
'pad',
'pad_constant_like',
'label_smooth',
'roi_pool',
'roi_align',
'dice_loss',
'image_resize',
'image_resize_short',
'resize_linear',
'resize_bilinear',
'resize_trilinear',
'resize_nearest',
'gather',
'gather_nd',
'scatter',
'scatter_nd_add',
'scatter_nd',
'random_crop',
'mean_iou',
'relu',
'selu',
'log',
'crop',
'crop_tensor',
'elu',
'relu6',
'pow',
'stanh',
'hard_sigmoid',
'swish',
'prelu',
'brelu',
'leaky_relu',
'soft_relu',
'flatten',
'stack',
'pad2d',
'unstack',
'unique',
'unique_with_counts',
'expand',
'expand_as',
'scale',
'elementwise_add',
'elementwise_div',
'elementwise_sub',
'elementwise_mul',
'elementwise_max',
'elementwise_min',
'elementwise_pow',
'elementwise_mod',
'elementwise_floordiv',
'uniform_random_batch_size_like',
'gaussian_random',
'sampling_id',
'gaussian_random_batch_size_like',
'sum',
'slice',
'strided_slice',
'shape',
'rank',
'size',
'logical_and',
'logical_or',
'logical_xor',
'logical_not',
'clip',
'clip_by_norm',
'mean',
'mul',
'maxout',
'space_to_depth',
'affine_grid',
'affine_channel',
'similarity_focus',
'hash',
'grid_sampler',
'log_loss',
'add_position_encoding',
'bilinear_tensor_product',
'merge_selected_rows',
'get_tensor_from_selected_rows',
'shuffle_channel',
'temporal_shift',
'py_func',
'psroi_pool',
'prroi_pool',
'pixel_shuffle',
'fsp_matrix',
'continuous_value_model',
'where',
'sign',
'deformable_conv',
'unfold',
'deformable_roi_pooling',
'filter_by_instag',
'shard_index',
'hard_swish',
'mish',
'gather_tree',
'uniform_random',
'unbind',
]
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(core.ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
def fc(input,
size,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
act=None,
name=None):
r"""
:api_attr: Static Graph
**Fully Connected Layer**
This operator creates a fully connected layer in the network. It can take
a Tensor(or LoDTensor) or a list of Tensor(or LoDTensor) as its inputs(see
Args in detail). It creates a variable called weight for each input Tensor,
which represents a fully connected weight matrix from each input unit to
each output unit. The fully connected layer multiplies each input Tensor
with its corresponding weight to produce an output Tensor with shape :math:`[M, size]` ,
where M is batch size. If a list of Tensor is given, the results of
multiple output Tensors with shape :math:`[M, size]` will be summed up. If :attr:`bias_attr`
is not None, a bias variable will be created and added to the output.
Finally, if :attr:`act` is not None, it will be applied to the output as well.
When the input is a single Tensor(or LoDTensor):
.. math::
Out = Act({XW + b})
When the input is a list of Tensor(or LoDTensor):
.. math::
Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
In the above equation:
* :math:`N`: Number of the input. N equals to len(input) if input is list of Variable.
* :math:`X_i`: The i-th input tensor.
* :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
* :math:`b`: The bias parameter created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output Tensor.
.. code-block:: text
Case 1:
Given a single Tensor data_1, and num_flatten_dims = 2:
data_1.data = [[[0.1, 0.2],
[0.3, 0.4]]]
data_1.shape = (1, 2, 2) # 1 is batch_size
out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2)
Then output is:
out.data = [[0.83234344], [0.34936576]]
out.shape = (1, 2, 1)
Case 2:
Given a list of Tensor:
data_1.data = [[[0.1, 0.2],
[0.3, 0.4]]]
data_1.shape = (1, 2, 2) # 1 is batch_size
data_2 = [[[0.1, 0.2, 0.3]]]
data_2.shape = (1, 1, 3)
out = fluid.layers.fc(input=[data_1, data_2], size=2)
Then:
out.data = [[0.18669507, 0.1893476]]
out.shape = (1, 2)
Args:
input (Variable|list of Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` or
a list of Tensor(or LoDTensor). The dimensions of the input Tensor is at least 2 and the data
type should be float32 or float64.
size(int): The number of output units in this layer, which also means the feature size of output
Tensor(or LoDTensor).
num_flatten_dims (int): The fc layer can accept an input Tensor with more than
two dimensions. If this happens, the multidimensional tensor will first be flattened
into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input
Tensor is flattened: the first :attr:`num_flatten_dims` (inclusive, index starts from 1)
dimensions will be flatten to form the first dimension of the final matrix (height of
the matrix), and the rest :math:`rank(X) - num\_flatten\_dims` dimensions are flattened to
form the second dimension of the final matrix (width of the matrix). For example, assuming that
X is a 5-dimensional Tensor with a shape [2, 3, 4, 5, 6], and :attr:`num_flatten_dims` = 3.
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1.
param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: Tensor or LoDTensor calculated by fc layer. The data type is same with input.
Raises:
ValueError: If dimensions of the input Tensor is less than 2.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# when input is single tensor
data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
# when input are multiple tensors
data_1 = fluid.data(name="data_1", shape=[-1, 32], dtype="float32")
data_2 = fluid.data(name="data_2", shape=[-1, 36], dtype="float32")
fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh")
"""
helper = LayerHelper("fc", **locals())
check_type(input, 'input', (list, tuple, Variable), 'fc')
if isinstance(input, (list, tuple)):
for i, input_x in enumerate(input):
check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc')
dtype = helper.input_dtype()
check_dtype(dtype, 'input', ['float16', 'float32', 'float64'], 'fc')
mul_results = []
for input_var, param_attr in helper.iter_inputs_and_params():
input_shape = input_var.shape
if num_flatten_dims == -1:
num_flatten_dims = len(input_shape) - 1
param_shape = [
reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
] + [size]
w = helper.create_parameter(
attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False)
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="mul",
inputs={"X": input_var,
"Y": w},
outputs={"Out": tmp},
attrs={"x_num_col_dims": num_flatten_dims,
"y_num_col_dims": 1})
mul_results.append(tmp)
if len(mul_results) == 1:
pre_bias = mul_results[0]
else:
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="sum",
inputs={"X": mul_results},
outputs={"Out": pre_bias},
attrs={"use_mkldnn": False})
# add bias
pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims)
# add activation
return helper.append_activation(pre_activation)
@deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding")
def embedding(input,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
r"""
:api_attr: Static Graph
**WARING:** This OP will be deprecated in a future release. This OP requires the
last dimension of Tensor shape must be equal to 1. It is recommended to use
fluid. :ref:`api_fluid_embedding` .
The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
This OP requires the last dimension of Tensor shape must be equal to 1. The shape
of output Tensor is generated by replacing the last dimension of the input Tensor shape
with emb_size.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[[1], [3]], [[2], [4]], [[4], [127]]]
input.shape = [3, 2, 1]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Case 2:
input is a LoDTensor with 1-level LoD. padding_idx = 0
input.lod = [[2, 3]]
input.data = [[1], [3], [2], [4], [0]]
input.shape = [5, 1]
Given size = [128, 16]
output is a LoDTensor:
out.lod = [[2, 3]]
out.shape = [5, 16]
out.data = [[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654],
[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]] # padding data
It will pad all-zero data when ids is 0.
Args:
input(Variable): A Tensor or LoDTensor with type int64, which contains the id information.
The last dimension of Tensor shape must be equal to 1. The value of the input id should
satisfy :math:`0<= id < size[0]` .
size(tuple|list): The shape of lookup table parameter. It should have two elements which
indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizer does not support sparse update,
such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
:ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
:ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
In these case, is_sparse must be False. Default: False.
is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
in multi-machine distributed CPU training. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
The local word vector needs to be transformed into numpy format, and the shape of local word
vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
is used to load custom or pre-trained word vectors. See code example 2 for details.
dtype(str|core.VarDesc.VarType): It refers to the data type of output Tensor.
It must be float32 or float64. Default: float32.
Returns:
Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
data = fluid.data(name='x', shape=[None, 1], dtype='int64')
# example 1
emb_1 = fluid.embedding(input=data, size=[128, 64])
# example 2: load custom or pre-trained word vectors
weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
w_param_attrs = fluid.ParamAttr(
name="emb_weight",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True)
emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32')
"""
helper = LayerHelper('embedding', **locals())
check_variable_and_dtype(input, 'input', ['int64'],
'fluid.layers.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.layers.embedding')
if is_distributed:
is_distributed = False
warnings.warn(
"is_distributed is go out of use, `fluid.contrib.layers.sparse_embedding` is your needed"
)
remote_prefetch = True if is_sparse else False
w = helper.create_parameter(
attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)
tmp = helper.create_variable_for_type_inference(dtype)
padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
helper.append_op(
type='lookup_table',
inputs={'Ids': input,
'W': w},
outputs={'Out': tmp},
attrs={
'is_sparse': is_sparse,
'is_distributed': is_distributed,
'remote_prefetch': remote_prefetch,
'padding_idx': padding_idx
})
return tmp
def _pull_sparse(input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True):
r"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the fleet table id of this embedding.
accessor_class(str): the pslib accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
helper.append_op(
type='pull_sparse',
inputs={'Ids': inputs,
'W': w},
outputs={'Out': outs},
attrs=attrs)
if len(outs) == 1:
return outs[0]
return outs
def _pull_sparse_v2(input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True):
r"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the pslib table id of this embedding.
accessor_class(str): the fleet accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse_v2(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
helper.append_op(
type='pull_sparse_v2',
inputs={'Ids': inputs,
'W': w},
outputs={'Out': outs},
attrs=attrs)
if len(outs) == 1:
return outs[0]
return outs
def _pull_box_sparse(input,
size,
dtype='float32',
is_distributed=False,
is_sparse=False):
r"""
**Pull Box Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
BoxPS lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.pull_box_sparse(input=data, size=[11])
"""
helper = LayerHelper('pull_box_sparse', **locals())
if dtype != 'float32':
raise ValueError(
"BoxPS only support float type embedding now, and your type is: " +
dtype)
helper.input_dtype()
inputs = helper.multiple_input()
outs = [
helper.create_variable_for_type_inference(dtype)
for i in range(len(inputs))
]
w = helper.create_parameter(
attr=helper.param_attr, shape=[size], dtype=dtype, is_bias=False)
helper.append_op(
type='pull_box_sparse',
inputs={'Ids': inputs,
'W': w},
outputs={'Out': outs},
attrs={
'size': size,
'is_distributed': is_distributed,
'is_sparse': is_sparse
})
if len(outs) == 1:
return outs[0]
return outs
@templatedoc()
def linear_chain_crf(input, label, param_attr=None, length=None):
"""
:api_attr: Static Graph
Linear Chain CRF.
${comment}
Args:
input(${emission_type}): ${emission_comment}
label(${label_type}): ${label_comment}
Length(${length_type}): ${length_comment}
param_attr(ParamAttr): The attribute of the learnable parameter for transition parameter.
Returns:
output(${emission_exps_type}): ${emission_exps_comment} \n
output(${transition_exps_type}): ${transition_exps_comment} \n
output(${log_likelihood_type}): ${log_likelihood_comment} \n
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
#define net structure, using LodTensor
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input_data = fluid.data(name='input_data', shape=[-1,10], dtype='float32')
label = fluid.data(name='label', shape=[-1,1], dtype='int')
emission= fluid.layers.fc(input=input_data, size=10, act="tanh")
crf_cost = fluid.layers.linear_chain_crf(
input=emission,
label=label,
param_attr=fluid.ParamAttr(
name='crfw',
learning_rate=0.01))
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
#define data, using LoDTensor
a = fluid.create_lod_tensor(np.random.rand(12,10).astype('float32'), [[3,3,4,2]], place)
b = fluid.create_lod_tensor(np.array([[1],[1],[2],[3],[1],[1],[1],[3],[1],[1],[1],[1]]),[[3,3,4,2]] , place)
feed1 = {'input_data':a,'label':b}
loss= exe.run(train_program,feed=feed1, fetch_list=[crf_cost])
print(loss)
#define net structure, using padding
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input_data2 = fluid.data(name='input_data2', shape=[-1,10,10], dtype='float32')
label2 = fluid.data(name='label2', shape=[-1,10,1], dtype='int')
label_length = fluid.data(name='length', shape=[-1,1], dtype='int')
emission2= fluid.layers.fc(input=input_data2, size=10, act="tanh", num_flatten_dims=2)
crf_cost2 = fluid.layers.linear_chain_crf(
input=emission2,
label=label2,
length=label_length,
param_attr=fluid.ParamAttr(
name='crfw',
learning_rate=0.01))
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
#define data, using padding
cc=np.random.rand(4,10,10).astype('float32')
dd=np.random.rand(4,10,1).astype('int64')
ll=np.array([[3],[3],[4],[2]])
feed2 = {'input_data2':cc,'label2':dd,'length':ll}
loss2= exe.run(train_program,feed=feed2, fetch_list=[crf_cost2])
print(loss2)
#[array([[ 7.8902354],
# [ 7.3602567],
# [ 10.004011],
# [ 5.86721 ]], dtype=float32)]
#you can use find_var to get transition parameter.
transition=np.array(fluid.global_scope().find_var('crfw').get_tensor())
print(transition)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'linear_chain_crf')
check_variable_and_dtype(label, 'label', ['int64'], 'linear_chain_crf')
helper = LayerHelper('linear_chain_crf', **locals())
size = input.shape[2] if length else input.shape[1]
transition = helper.create_parameter(
attr=helper.param_attr,
shape=[size + 2, size],
dtype=helper.input_dtype())
alpha = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
emission_exps = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
transition_exps = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
log_likelihood = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
this_inputs = {
"Emission": [input],
"Transition": transition,
"Label": [label]
}
if length:
this_inputs['Length'] = [length]
helper.append_op(
type='linear_chain_crf',
inputs=this_inputs,
outputs={
"Alpha": [alpha],
"EmissionExps": [emission_exps],
"TransitionExps": transition_exps,
"LogLikelihood": log_likelihood
})
return log_likelihood
@templatedoc()
def crf_decoding(input, param_attr, label=None, length=None):
"""
:api_attr: Static Graph
${comment}
Args:
input(Tensor): ${emission_comment}
param_attr (ParamAttr|None): To specify the weight parameter attribute.
Default: None, which means the default weight parameter property is
used. See usage for details in :ref:`api_paddle_fluid_param_attr_ParamAttr` .
label(${label_type}, optional): ${label_comment}
length(${length_type}, optional): ${length_comment}
Returns:
Tensor: ${viterbi_path_comment}
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
# LoDTensor-based example
num_labels = 10
feature = paddle.static.data(name='word_emb', shape=[-1, 784], dtype='float32', lod_level=1)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64', lod_level=1)
emission = paddle.static.nn.fc(feature, size=num_labels)
crf_cost = paddle.fluid.layers.linear_chain_crf(input=emission, label=label,
param_attr=paddle.ParamAttr(name="crfw"))
crf_decode = paddle.static.nn.crf_decoding(input=emission,
param_attr=paddle.ParamAttr(name="crfw"))
# Common tensor example
num_labels, max_len = 10, 20
feature = paddle.static.data(name='word_emb_pad', shape=[-1, max_len, 784], dtype='float32')
label = paddle.static.data(name='label_pad', shape=[-1, max_len, 1], dtype='int64')
length = paddle.static.data(name='length', shape=[-1, 1], dtype='int64')
emission = paddle.static.nn.fc(feature, size=num_labels,
num_flatten_dims=2)
crf_cost = paddle.fluid.layers.linear_chain_crf(input=emission, label=label, length=length,
param_attr=paddle.ParamAttr(name="crfw_pad"))
crf_decode = paddle.static.nn.crf_decoding(input=emission, length=length,
param_attr=paddle.ParamAttr(name="crfw_pad"))
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'crf_decoding')
helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
inputs = {"Emission": [input], "Transition": transition, "Label": label}
if length:
inputs['Length'] = length
helper.append_op(
type='crf_decoding',
inputs=inputs,
outputs={"ViterbiPath": [viterbi_path]})
return viterbi_path
@templatedoc()
def cos_sim(X, Y):
"""
${comment}
Args:
X (Tensor): ${x_comment}.
Y (Tensor): ${y_comment}.
Returns:
A Tensor representing the output of cosine(X, Y).
Examples:
.. code-block:: python
import paddle
x = paddle.rand(shape=[3, 7], dtype='float32')
y = paddle.rand(shape=[1, 7], dtype='float32')
out = paddle.fluid.layers.cos_sim(x, y)
print(out)
"""
check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim')
check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim')
helper = LayerHelper('cos_sim', **locals())
out = helper.create_variable_for_type_inference(dtype=X.dtype)
xnorm = helper.create_variable_for_type_inference(dtype=X.dtype)
ynorm = helper.create_variable_for_type_inference(dtype=X.dtype)
helper.append_op(
type='cos_sim',
inputs={'X': [X],
'Y': [Y]},
outputs={'Out': [out],
'XNorm': [xnorm],
'YNorm': [ynorm]})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.dropout")
def dropout(x,
dropout_prob,
is_test=None,
seed=None,
name=None,
dropout_implementation="downgrade_in_infer"):
"""
Computes dropout.
Drop or keep each element of `x` independently. Dropout is a regularization
technique for reducing overfitting by preventing neuron co-adaption during
training. The dropout operator randomly sets (according to the given dropout
probability) the outputs of some units to zero, while others are remain
unchanged.
dropout op can be removed from the program to make the program more efficient.
Args:
x (Variable): The input tensor variable. The data type is float16 or float32 or float64.
dropout_prob (float): Probability of setting units to zero.
is_test (bool): A flag indicating whether it is in test phrase or not.
Default None, in dynamic graph, it use global tracer mode; in static graph, it means False.
seed (int): A Python integer used to create random seeds. If this
parameter is set to None, a random seed is used.
NOTE: If an integer seed is given, always the same output
units will be dropped. DO NOT use a fixed seed in training.Default: None.
name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
dropout_implementation(string): ['downgrade_in_infer'(default)|'upscale_in_train']
1. downgrade_in_infer(default), downgrade the outcome at inference
- train: out = input * mask
- inference: out = input * (1.0 - dropout_prob)
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
2. upscale_in_train, upscale the outcome at training time
- train: out = input * mask / ( 1.0 - dropout_prob )
- inference: out = input
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
Returns:
A Variable holding Tensor representing the dropout, has same shape and data type with `x`.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32")
dropped = fluid.layers.dropout(x, dropout_prob=0.5)
"""
# fast return for p == 0
if dropout_prob == 0:
return x
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
}
return attrs
if in_dygraph_mode():
if (seed is None or
seed == 0) and default_main_program().random_seed != 0:
seed = default_main_program().random_seed
if is_test is None:
is_test = not _dygraph_tracer()._train_mode
out, mask = core.ops.dropout(
x, 'dropout_prob', dropout_prob, 'is_test', is_test, 'fix_seed',
seed is not None, 'seed', seed if seed is not None else 0,
'dropout_implementation', dropout_implementation)
return out
helper = LayerHelper('dropout', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed)
helper.append_op(
type='dropout',
inputs={'X': [x]},
outputs={'Out': [out],
'Mask': [mask]},
attrs=attrs)
return out
@templatedoc()
def chunk_eval(input,
label,
chunk_scheme,
num_chunk_types,
excluded_chunk_types=None,
seq_length=None):
r"""
This operator computes the precision, recall and F1-score for chunk detection.
It is often used in sequence tagging tasks, such as Named Entity Recognition(NER).
For some basics of chunking, please refer to
`Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>`_ .
This operator supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
Here is a NER example for the usage of these tagging schemes:
.. code-block:: python
====== ====== ====== ===== == ============ ===== ===== ===== == =========
Li Ming works at Agricultural Bank of China in Beijing.
====== ====== ====== ===== == ============ ===== ===== ===== == =========
IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC
IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC
IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC
IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC
====== ====== ====== ===== == ============ ===== ===== ===== == =========
There are three chunk types(named entity types) including PER(person), ORG(organization)
and LOC(location), and we can see that the labels have the form `<tag type>-<chunk type>` .
Since the implementation of this operator actually uses label ids rather than
label strings, to make it work, there should be a way to map label ids to
tag types and chunk types. This operator uses the following way to do mapping:
.. code-block:: python
tag_type = label % num_tag_type
chunk_type = label / num_tag_type
where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type`
is the num of chunk types, and `tag_type` get its value from the following table.
.. code-block:: python
Scheme Begin Inside End Single
plain 0 - - -
IOB 0 1 - -
IOE - 0 1 -
IOBES 0 1 2 3
Accordingly, in the above NER example, if the tagging scheme is IOB and chunk
types are ORG, PER and LOC, then the label ids would be as follows:
.. code-block:: python
B-ORG 0
I-ORG 1
B-PER 2
I-PER 3
B-LOC 4
I-LOC 5
O 6
With which we can map each label id to the corresponding tag type and chunk
type correctly.
Args:
input (Tensor): A Tensor representing the predicted labels
from the network. Its shape would be `[N, M, 1]`,
where `N` stands for batch size, `M` for sequence length.
The data type should be int64.
label (Tensor): A Tensor representing the ground-truth labels.
It should have the same shape, lod and data type as ``input`` .
chunk_scheme (str): Indicate the tagging schemes used here. The value must
be IOB, IOE, IOBES or plain.
num_chunk_types (int): The number of chunk types.
excluded_chunk_types (list, optional): Indicate the chunk types shouldn't
be taken into account. It should be a list of chunk type ids(integer).
Default None.
seq_length(Tensor, optional): A 1D Tensor containing the length of each
sequence when ``input`` and ``label`` are Tensor. Default None.
Returns:
tuple: A tuple including precision, recall, F1-score, chunk number detected, \
chunk number in ground-truth, chunk number correctly detected. Each \
is a Tensor with shape `[1]`. The data type of precision, recall and \
F1-score all is float32, and the others' data type all is int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dict_size = 10000
label_dict_len = 7
sequence = fluid.data(
name='id', shape=[None, 1], lod_level=1, dtype='int64')
embedding = fluid.embedding(
input=sequence, size=[dict_size, 512])
hidden = fluid.layers.fc(input=embedding, size=512)
label = fluid.data(
name='label', shape=[None, 1], lod_level=1, dtype='int64')
crf = fluid.layers.linear_chain_crf(
input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw"))
crf_decode = fluid.layers.crf_decoding(
input=hidden, param_attr=fluid.ParamAttr(name="crfw"))
fluid.layers.chunk_eval(
input=crf_decode,
label=label,
chunk_scheme="IOB",
num_chunk_types=int((label_dict_len - 1) / 2))
"""
helper = LayerHelper("chunk_eval", **locals())
check_variable_and_dtype(input, 'input', ['int64'], 'chunk_eval')
check_variable_and_dtype(label, 'label', ['int64'], 'chunk_eval')
# prepare output
precision = helper.create_variable_for_type_inference(dtype="float32")
recall = helper.create_variable_for_type_inference(dtype="float32")
f1_score = helper.create_variable_for_type_inference(dtype="float32")
num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_label_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_correct_chunks = helper.create_variable_for_type_inference(
dtype="int64")
this_input = {"Inference": [input], "Label": [label]}
if seq_length is not None:
this_input["SeqLength"] = [seq_length]
helper.append_op(
type="chunk_eval",
inputs=this_input,
outputs={
"Precision": [precision],
"Recall": [recall],
"F1-Score": [f1_score],
"NumInferChunks": [num_infer_chunks],
"NumLabelChunks": [num_label_chunks],
"NumCorrectChunks": [num_correct_chunks]
},
attrs={
"num_chunk_types": num_chunk_types,
"chunk_scheme": chunk_scheme,
"excluded_chunk_types": excluded_chunk_types or []
})
return (precision, recall, f1_score, num_infer_chunks, num_label_chunks,
num_correct_chunks)
@deprecated(since="2.0.0", update_to="paddle.nn.functional.softmax")
def softmax(input, use_cudnn=True, name=None, axis=-1):
r"""
This operator implements the softmax layer. The calculation process is as follows:
1. The dimension :attr:`axis` of the ``input`` will be permuted to the last.
2. Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
second dimension(row length) is the same as the dimension :attr:`axis` of the input
tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator
squashes the K-dimensional(K is the width of the matrix, which is also the size
of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a
K-dimensional vector of real values in the range [0, 1] that add up to 1.
3. After the softmax operation is completed, the inverse operations of steps 1 and 2
are performed to restore the two-dimensional matrix to the same dimension as the ``input``.
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
For each row :math:`i` and each column :math:`j` in the matrix, we have:
.. math::
Out[i, j] = \\frac{\\exp(X[i, j])}{\\sum_j(exp(X[i, j])}
Example:
.. code-block:: text
Case 1:
Input:
X.shape = [2, 3, 4]
X.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = -1
Output:
Out.shape = [2, 3, 4]
Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.07232949, 0.19661193, 0.19661193, 0.53444665]],
[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
Case 2:
Input:
X.shape = [2, 3, 4]
X.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = 1
Output:
Out.shape = [2, 3, 4]
Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
[0.01786798, 0.01786798, 0.04661262, 0.04661262],
[0.97555875, 0.97555875, 0.93623955, 0.93623955]],
[[0.00490169, 0.00490169, 0.00490169, 0.00490169],
[0.26762315, 0.26762315, 0.26762315, 0.26762315],
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Args:
input (Tensor): The input tensor. A multi-dimension ``Tensor`` with type float32 or float64.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. To improve performance, set use_cudnn to True by default.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Default: None.
will be named automatically. Default: None.
axis (int, optional): The index of dimension to perform softmax calculations, it should
be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
input tensor. Default: -1. -1 means the last dimension.
Returns:
Tensor: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]], dtype='float32')
y = F.softmax(x, axis=1)
print(y)
# [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
# [0.01786798, 0.01786798, 0.04661262, 0.04661262],
# [0.97555870, 0.97555870, 0.93623954, 0.93623954]],
# [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
# [0.26762316, 0.26762316, 0.26762316, 0.26762316],
# [0.72747517, 0.72747517, 0.72747517, 0.72747517]]]
"""
if in_dygraph_mode():
return core.ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn)
inputs = {"X": [input]}
attrs = {"axis": axis, "use_cudnn": use_cudnn}
helper = LayerHelper('softmax', **locals())
check_variable_and_dtype(input, 'input/x',
['float16', 'float32', 'float64'], 'softmax')
dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="softmax",
inputs={"X": input},
outputs={"Out": softmax_out},
attrs=attrs)
return softmax_out
def conv2d(input,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format="NCHW"):
r"""
:api_attr: Static Graph
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW or NHWC format, where N is batch size, C is the number of
channels, H is the height of the feature, and W is the width of the feature.
Filter is in MCHW format, where M is the number of output image channels,
C is the number of input image channels, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input image channels divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a tensor with MCHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Tensor): The input is 4-D Tensor with shape [N, C, H, W], the data type
of input is float16 or float32 or float64.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size
is a tuple, it must contain two integers, (filter_size_height,
filter_size_width). Otherwise, filter_size_height = filter_size_width =\
filter_size.
stride (int|tuple): The stride size. It means the stride in convolution.
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension.If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when
`data_format` is `"NCHW"`, `padding` can be in the form `[[0,0], [0,0],
[pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel
points. If dilation is a tuple, it must contain two integers, (dilation_height,
dilation_width). Otherwise, dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Tensor representing the conv2d, whose data type is the
same with input. If act is None, the tensor storing the convolution
result, and if act is not None, the tensor storing convolution
and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If the channel dimmention of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ShapeError: If the input is not 4-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels * groups.
ShapeError: If the number of output channels is not be divided by groups.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
print(conv2d.shape) # [-1, 2, 30, 30]
"""
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'conv2d')
num_channels = input.shape[1]
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
channel_last = (data_format == "NHWC")
num_channels = input.shape[3] if channel_last else input.shape[1]
if num_channels < 0:
raise ValueError(
"The channel dimmention of the input(%s) should be defined. "
"Received: %s." % (str(input.shape), str(num_channels)))
assert param_attr is not False, "param_attr should not be False here."
l_type = 'conv2d'
if (num_channels == groups and num_filters % num_channels == 0 and
not use_cudnn):
l_type = 'depthwise_conv2d'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError(
"the channel of input must be divisible by groups,"
"received: the channel of input is {}, the shape of input is {}"
", the groups is {}".format(num_channels, input.shape, groups))
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
# padding
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
if utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
else:
padding = utils.convert_to_list(padding, 2, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0]
padding = _update_padding(padding, data_format)
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
if (core.is_compiled_with_cuda() and paddle.fluid.get_flags(
"FLAGS_conv2d_disable_cudnn")["FLAGS_conv2d_disable_cudnn"]):
use_cudnn = False
helper.append_op(
type=l_type,
inputs={
'Input': input,
'Filter': filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
'fuse_relu_before_depthwise_conv': False,
"padding_algorithm": padding_algorithm,
"data_format": data_format,
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
return helper.append_activation(pre_act)
def conv3d(input,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format="NCDHW"):
r"""
:api_attr: Static Graph
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args:
input (Tensor): The input is 5-D Tensor with shape [N, C, D, H, W], the data
type of input is float16 or float32 or float64.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size.
stride (int|tuple): The stride size. It means the stride in convolution. If stride is a
tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv3d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d, whose data type is
the same with input. If act is None, the tensor variable storing the
convolution result, and if act is not None, the tensor variable storing
convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If the channel dimmention of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels * groups.
ShapeError: If the number of output channels is not be divided by groups.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
res = paddle.static.nn.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
output = exe.run(feed={"data": x}, fetch_list=[res])
print(output)
"""
l_type = 'conv3d'
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s." % str(data_format))
channel_last = (data_format == "NDHWC")
num_channels = input.shape[4] if channel_last else input.shape[1]
if num_channels < 0:
raise ValueError(
"The channel dimmention of the input(%s) should be defined. "
"Received: %s." % (str(input.shape), str(num_channels)))
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError(
"The number of input channels must be divisible by Attr(groups). "
"Received: number of channels(%s), groups(%s)." %
(str(num_channels), str(groups)))
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 3, 'filter_size')
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
else:
padding = utils.convert_to_list(padding, 3, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0]
padding = _update_padding(padding, data_format)
input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * filter_size[
2] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=l_type,
inputs={
'Input': input,
'Filter': filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
"padding_algorithm": padding_algorithm,
"data_format": data_format,
})
if data_format == 'NCDHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
return helper.append_activation(pre_act)
@templatedoc()
def pool2d(input,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
name=None,
exclusive=True,
data_format="NCHW"):
"""
${comment}
Args:
input (Variable): The input tensor of pooling operator which is a 4-D tensor with
shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
`"NHWC"`, where `N` is batch size, `C` is the number of channels,
`H` is the height of the feature, and `W` is the width of the
feature. The data type if float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be a square of an int.
pool_type: ${pooling_type_comment}
pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width).
Otherwise, the pool stride size will be a square of an int.
pool_padding (string|int|list|tuple): The pool padding. If `pool_padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If pool padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCHW"`,
`pool_padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Otherwise, the pool padding size will be a square of an int.
global_pooling (bool): ${global_pooling_comment}
use_cudnn (bool): ${use_cudnn_comment}
ceil_mode (bool): ${ceil_mode_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is `true`.
data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `pool_type` is not "max" nor "avg".
ValueError: If `global_pooling` is False and `pool_size` is -1.
TypeError: If `use_cudnn` is not a bool value.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
ShapeError: If the input is not a 4-D or 5-D Tensor.
ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
# max pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "max",
pool_stride = 1,
global_pooling=False)
# average pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=False)
# global average pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=True)
# Attr(pool_padding) is a list with 4 elements, Attr(data_format) is "NCHW".
out_1 = fluid.layers.pool2d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = [1, 2, 1, 0],
data_format = "NCHW")
# Attr(pool_padding) is a string, Attr(data_format) is "NCHW".
out_2 = fluid.layers.pool2d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = "VALID",
data_format = "NCHW")
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When Attr(global_pooling) is False, Attr(pool_size) must be passed "
"and be a valid value. Received pool_size: %s." % str(pool_size))
if not isinstance(use_cudnn, bool):
raise TypeError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s." % str(use_cudnn))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')
def update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
if utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
else:
padding = utils.convert_to_list(padding, 2, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(pool_padding, str):
pool_padding = pool_padding.upper()
if pool_padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
% str(pool_padding))
if pool_padding == "VALID":
padding_algorithm = "VALID"
pool_padding = [0, 0]
if ceil_mode != False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", Attr(ceil_mode) must be False. "
"Received ceil_mode: True.")
elif pool_padding == "SAME":
padding_algorithm = "SAME"
pool_padding = [0, 0]
pool_padding = update_padding(pool_padding, data_format)
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=op_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": use_cudnn,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
return pool_out
@templatedoc()
def pool3d(input,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
name=None,
exclusive=True,
data_format="NCDHW"):
"""
${comment}
Args:
input (Variable): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of
input tensor is `"NCDHW"` or `"NDHWC"`, where `N` is batch size, `C` is
the number of channels, `D` is the depth of the feature,
`H` is the height of the feature, and `W` is the width
of the feature.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size
is a tuple or list, it must contain three integers,
(pool_size_Depth, pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be the cube of an int.
pool_type (string): ${pooling_type_comment}
pool_stride (string|int|list|tuple)): The pool padding. If `pool_padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If pool stride size is a tuple or list,
it must contain three integers, `[stride_Depth, stride_Height, stride_Width]`.
Otherwise, the pool stride size will be a cube of an int.
pool_padding (int|list|tuple): The pool padding size. If pool padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
global_pooling (bool): ${global_pooling_comment}
use_cudnn (bool): ${use_cudnn_comment}
ceil_mode (bool): ${ceil_mode_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is true.
data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
Variable: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `pool_type` is not "max" nor "avg".
ValueError: If `global_pooling` is False and `pool_size` is -1.
TypeError: If `use_cudnn` is not a bool value.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
ShapeError: If the input is not a 4-D or 5-D Tensor.
ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
# max pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "max",
pool_stride = 1,
global_pooling=False)
# average pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=False)
# global average pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=True)
# example 1:
# Attr(pool_padding) is a list with 6 elements, Attr(data_format) is "NCDHW".
out_1 = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
pool_padding = [1, 2, 1, 0, 1, 2],
global_pooling = False,
data_format = "NCDHW")
# example 2:
# Attr(pool_padding) is a string, Attr(data_format) is "NCDHW".
out_2 = fluid.layers.pool3d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = "VALID",
global_pooling = False,
data_format = "NCDHW")
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When Attr(global_pooling) is False, Attr(pool_size) must be passed "
"and be a valid value. Received Attr(pool_size): %s." %
str(pool_size))
if not isinstance(use_cudnn, bool):
raise TypeError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s" % str(data_format))
pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
pool_stride = utils.convert_to_list(pool_stride, 3, 'pool_stride')
def update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, (list, tuple)):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
else:
padding = utils.convert_to_list(padding, 3, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(pool_padding, str):
pool_padding = pool_padding.upper()
if pool_padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
% str(pool_padding))
if pool_padding == "VALID":
padding_algorithm = "VALID"
pool_padding = [0, 0, 0]
if ceil_mode != False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", ceil_mode must be False. "
"Received ceil_mode: True.")
elif pool_padding == "SAME":
padding_algorithm = "SAME"
pool_padding = [0, 0, 0]
pool_padding = update_padding(pool_padding, data_format)
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=op_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": use_cudnn,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
return pool_out
@deprecated(since="2.0.0")
@templatedoc(op_type="pool2d")
def adaptive_pool2d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
r"""
This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch
size, C is the number of channels, H is the height of the feature, and W is
the width of the feature. Parameters(pool_size) should contain two elements which
represent height and width, respectively. Also the H and W dimensions of output(Out)
is same as Parameter(pool_size). The output tensor shape will be [N, C, pool_size[0], pool_size[1]]
For average adaptive pool2d:
.. math::
hstart &= floor(i * H_{in} / H_{out})
hend &= ceil((i + 1) * H_{in} / H_{out})
wstart &= floor(j * W_{in} / W_{out})
wend &= ceil((j + 1) * W_{in} / W_{out})
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Args:
input (Tensor): The input tensor of pooling operator, which is a 4-D tensor
with shape [N, C, H, W]. The format of input tensor is NCHW,
where N is batch size, C is the number of channels, H is the
height of the feature, and W is the width of the feature.
The data type is float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
pool_type: ${pooling_type_comment}
require_index (bool): If true, the index of max pooling point will be returned along
with outputs. It cannot be set in average pooling type. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of adaptive pooling result. The data type is same
as input tensor.
Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'.
ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
ValueError: 'pool_size' should be a list or tuple with length as 2.
Examples:
.. code-block:: python
# average adaptive pool2d
# suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle
paddle.enable_static()
data = paddle.rand(shape=[1,3,32,32])
pool_out = paddle.fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='avg')
# max adaptive pool2d
# suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
#
import paddle
data = paddle.rand(shape=[1,3,32,32])
pool_out = paddle.fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool2d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool2d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d')
check_type(require_index, 'require_index', bool, 'adaptive_pool2d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if pool_type == "avg" and require_index:
raise ValueError(
"invalid setting 'require_index' true when 'pool_type' is 'avg'.")
pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
if pool_type == "max":
l_type = 'max_pool2d_with_index'
else:
l_type = "pool2d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
if pool_type == "max":
mask = helper.create_variable_for_type_inference(dtype)
outputs["Mask"] = mask
helper.append_op(
type=l_type,
inputs={"X": input},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return (pool_out, mask) if require_index else pool_out
@deprecated(since="2.0.0")
@templatedoc(op_type="pool3d")
def adaptive_pool3d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
r"""
This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch
size, C is the number of channels, D is the depth of the feature, H is the height of
the feature, and W is the width of the feature. Parameters(pool_size) should contain
three elements which represent height and width, respectively. Also the D, H and W
dimensions of output(Out) is same as Parameter(pool_size). The output tensor shape
will be [N, C, pool_size[0], pool_size[1], pool_size[2]]
For average adaptive pool3d:
.. math::
dstart &= floor(i * D_{in} / D_{out})
dend &= ceil((i + 1) * D_{in} / D_{out})
hstart &= floor(j * H_{in} / H_{out})
hend &= ceil((j + 1) * H_{in} / H_{out})
wstart &= floor(k * W_{in} / W_{out})
wend &= ceil((k + 1) * W_{in} / W_{out})
Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
Args:
input (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of input tensor is NCDHW, where
N is batch size, C is the number of channels, D is the depth of the feature,
H is the height of the feature, and W is the width of the feature.
The data type is float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain three integers, (Depth, Height, Width).
pool_type: ${pooling_type_comment}
require_index (bool): If true, the index of max pooling point will be returned along
with outputs. It cannot be set in average pooling type. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of adaptive pooling result. The data type is same as input tensor.
Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'.
ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
ValueError: 'pool_size' should be a list or tuple with length as 2.
Examples:
.. code-block:: python
# average adaptive pool3d
# suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into l * m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(j * H / m)
# hend = ceil((j + 1) * H / m)
# wstart = floor(k * W / n)
# wend = ceil((k + 1) * W / n)
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#
import paddle
paddle.enable_static()
data = paddle.rand(shape=[1,3,32,32,32])
pool_out = paddle.fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='avg')
# max adaptive pool3d
# suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into l * m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(j * H / m)
# hend = ceil((j + 1) * H / m)
# wstart = floor(k * W / n)
# wend = ceil((k + 1) * W / n)
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#
import paddle
data = paddle.rand(shape=[1,3,32,32,32])
pool_out = paddle.fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool3d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool3d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d')
check_type(require_index, 'require_index', bool, 'adaptive_pool3d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if pool_type == "avg" and require_index:
raise ValueError(
"invalid setting 'require_index' true when 'pool_type' is 'avg'.")
pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
if pool_type == "max":
l_type = 'max_pool3d_with_index'
else:
l_type = "pool3d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
if pool_type == "max":
mask = helper.create_variable_for_type_inference(dtype)
outputs["Mask"] = mask
helper.append_op(
type=l_type,
inputs={"X": input},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return (pool_out, mask) if require_index else pool_out
def batch_norm(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False):
r"""
:api_attr: Static Graph
**Batch Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
moving\_mean = moving\_mean * momentum + mini-batch\_mean * (1. - momentum) \\\\
moving\_var = moving\_var * momentum + mini-batch\_var * (1. - momentum)
moving_mean is global mean and moving_var is global variance.
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global (or running) statistics. (It usually got from the
pre-trained model.)
The training and testing (or inference) have the same behavior:
.. math::
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta
Note:
if build_strategy.sync_batch_norm=True, the batch_norm in network will use
sync_batch_norm automatically.
`is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
Args:
input(Tensor): The rank of input Tensor can be 2, 3, 4, 5. The data type
is float16 or float32 or float64.
act(string, Default None): Activation type, linear|relu|prelu|...
is_test (bool, Default False): A flag indicating whether it is in
test phrase or not.
momentum(float|Tensor, Default 0.9): The value used for the moving_mean and
moving_var computation. This should be a float number or a Tensor with
shape [1] and data type as float32. The updated formula is:
:math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
:math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
Default is 0.9.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
is set to None, batch_norm will save global mean with a random name, otherwise, batch_norm
will save global mean with the string.
moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
If it is set to None, batch_norm will save global variance with a random name, otherwise, batch_norm
will save global variance with the string.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
average when model average is enabled.
use_global_stats(bool, Default False): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period.
Returns:
A Tensor which is the result after applying batch normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = paddle.static.nn.fc(x=x, size=200)
print(hidden1.shape)
# [3, 200]
hidden2 = paddle.static.nn.batch_norm(input=hidden1)
print(hidden2.shape)
# [3, 200]
"""
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
helper = LayerHelper('batch_norm', **locals())
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'batch_norm')
dtype = helper.input_dtype()
# use fp32 for bn parameter
if dtype == core.VarDesc.VarType.FP16:
dtype = core.VarDesc.VarType.FP32
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
mean.stop_gradient = True
variance = helper.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance_out share the same memory
variance_out = variance
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
reserve_space = None
if not is_test:
reserve_space = helper.create_variable_for_type_inference(
dtype=helper.input_dtype(), stop_gradient=True)
batch_norm_out = input if in_place else \
helper.create_variable_for_type_inference(dtype)
inputs = {
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
}
attrs = {
"epsilon": epsilon,
"is_test": is_test,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats
}
if isinstance(momentum, Variable):
inputs['MomemtumTensor'] = momentum
else:
attrs['momentum'] = momentum
outputs = {
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
}
if reserve_space is not None:
outputs["ReserveSpace"] = reserve_space
helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
return helper.append_activation(batch_norm_out)
def inplace_abn(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False,
act_alpha=1.0):
r"""
**In-place Activation Batch Normalization Layer**
This layer calculates batch normalization and activation with in-place memory.
For batch normalization calculations, see `fluid.layers.batch_norm`.
For in-place activation batch normalization, see `In-Place Activated BatchNorm for
Memory-Optimized Training of DNNs <https://arxiv.org/abs/1712.02616>`_
`inplace_abn` only support activation type as `None`, `identity`, `leaky_relu`,
`elu` currently.
`inplace_abn` only support data type as `float32`, `float64` currently.
Note:
if build_strategy.sync_batch_norm=True, the batch_norm in network will use
sync_batch_norm automatically.
`is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
Args:
input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type
is float16 or float32 or float64.
act(string, Default None): Activation type, linear|relu|prelu|...
is_test (bool, Default False): A flag indicating whether it is in
test phrase or not.
momentum(float|Variable, Default 0.9): The value used for the moving_mean and
moving_var computation. This should be a float number or a Variable with
shape [1] and data type as float32. The updated formula is:
:math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
:math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
Default is 0.9.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of inplace_abn. If it is set to None or one attribute of ParamAttr, inplace_abn
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of inplace_abn.
If it is set to None or one attribute of ParamAttr, inplace_abn
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
is set to None, inplace_abn will save global mean with a random name, otherwise, inplace_abn
will save global mean with the string.
moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
If it is set to None, inplace_abn, will save global variance with a random name, otherwise, inplace_abn
will save global variance with the string.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
average when model average is enabled.
use_global_stats(bool, Default False): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period.
act_alpha(float, Default 1.0): when activation is in ['elu', 'identity', 'leaky_relu'],
inplace activative batch normalization will be used, and alpha parameter for activation
can be given by this parameter.
Returns:
A Variable holding Tensor which is the result after applying batch normalization and activation on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.inplace_abn(input=hidden1)
hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2)
"""
assert act in [None, 'identity', 'leaky_relu', 'elu'], \
"inplace_abn only support act as None, 'identity', " \
"'leaky_relu', 'elu' currently"
assert bias_attr is not False, "bias_attr should not be False in inplace_abn."
helper = LayerHelper('inplace_abn', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'inplace_abn')
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
mean.stop_gradient = True
variance = helper.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
reserve_space = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
batch_norm_out = input
inputs = {
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
}
attrs = {
"epsilon": epsilon,
"is_test": is_test,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
"activation": act,
"alpha": act_alpha,
}
if isinstance(momentum, Variable):
inputs['MomemtumTensor'] = momentum
else:
attrs['momentum'] = momentum
outputs = {
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
}
if reserve_space is not None:
outputs["ReserveSpace"] = reserve_space
helper.append_op(
type="inplace_abn", inputs=inputs, outputs=outputs, attrs=attrs)
return batch_norm_out
def instance_norm(input,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
name=None):
r"""
:api_attr: Static Graph
**Instance Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
DataLayout: NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Instance Normalization: The Missing Ingredient for
Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\
\\ mean\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Note:
`H` means height of feature map, `W` means width of feature map.
Args:
input(Tensor): The rank of input tensor can be 2, 3, 4, 5.
The data type is float32 or float64.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None|bool, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. If the param_attr is set to False, instance_norm will not create param_attr.
Default: None.
bias_attr(ParamAttr|None|bool, optional): The parameter attribute for the bias of instance_norm.
If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
If the bias_attr is set to False, instance_norm will not create bias_attr.
Default: None.
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
A Tensor which is the result after applying instance normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = paddle.static.nn.fc(x, size=200)
hidden2 = paddle.static.nn.instance_norm(hidden1)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'instance_norm')
if param_attr is False:
assert bias_attr is False, "param_attr and bias_attr must be set to Fasle at the same time in instance_norm"
helper = LayerHelper('instance_norm', **locals())
dtype = helper.input_dtype()
# use fp32 for in parameter
if dtype == core.VarDesc.VarType.FP16:
dtype = core.VarDesc.VarType.FP32
input_shape = input.shape
channel_num = input_shape[1]
param_shape = [channel_num]
if param_attr != False and bias_attr != False:
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr,
shape=param_shape,
dtype=dtype,
is_bias=True,
default_initializer=Constant(0.0))
# create output
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
instance_norm_out = helper.create_variable_for_type_inference(dtype)
inputs = {"X": input}
if param_attr != False and bias_attr != False:
inputs["Scale"] = scale
inputs["Bias"] = bias
helper.append_op(
type="instance_norm",
inputs=inputs,
outputs={
"Y": instance_norm_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
},
attrs={"epsilon": epsilon, })
return instance_norm_out
@static_only
def data_norm(input,
act=None,
epsilon=1e-05,
param_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
slot_dim=-1,
sync_stats=False,
summary_decay_rate=0.9999999,
enable_scale_and_shift=False):
r"""
:api_attr: Static Graph
**Data Normalization Layer**
This op can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Args:
input(Tensor): The input Tensor.
act(string, Default None): Activation type, linear|relu|prelu|...
epsilon(float, Default 1e-05):
param_attr(ParamAttr): The parameter attribute for Parameter `scale`.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
moving_mean_name(string, Default None): The name of moving_mean which store the global Mean.
moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance
should do model average when model average is enabled.
slot_dim(int): The embedding dimension of one slot. Slot is a set of one specific feature. In pslib mode, we
distinguish feature ids by slot and pull their embeddings from parameter server (pslib). The first
place of the embedding is the historical show number (occurence time of this feature id with a label 0).
If the input of this op is concated by slot-wise embeddings, and the show number is zero when this slot
is new or empty, the normalization result may be impractical. To avoid this, we add slot_dim to locate
the show number and judge if the show number is zero. If so, we choose to skip normalization on this
embedding.
sync_stats(bool, Default False): When running with multiple GPU cards, using allreduce to sync the
summary messages.
summary_decay_rate(float, Default 0.9999999): The decay rate when updating summary.
enable_scale_and_shift(bool, Default False): do scale&shift after normalization.
Returns:
Tensor: A tensor which is the result after applying data normalization on the input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.randn(shape=[32,100])
hidden2 = paddle.static.nn.data_norm(input=x)
"""
helper = LayerHelper('data_norm', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
batch_size_default = 1e4
batch_sum_default = 0.0
batch_square_sum_default = 1e4
scale_w_default = 1.0
bias_default = 0.0
if param_attr and isinstance(param_attr, dict):
batch_size_default = param_attr.get("batch_size", 1e4)
batch_sum_default = param_attr.get("batch_sum", 0.0)
batch_square_sum_default = param_attr.get("batch_square", 1e4)
if enable_scale_and_shift:
scale_w_default = param_attr.get("scale_w", 1.0)
bias_default = param_attr.get("bias", 0.0)
# create scale and shift(bias) when enable_scale_and_shift is True
if name == None:
name = "dn"
if enable_scale_and_shift:
scale_w = helper.create_parameter(
attr=ParamAttr(
name=name + '.scale_w',
initializer=Constant(value=float(scale_w_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
bias = helper.create_parameter(
attr=ParamAttr(
name=name + '.bias',
initializer=Constant(value=float(bias_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
# create parameter
batch_size = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_size',
initializer=Constant(value=float(batch_size_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_sum',
initializer=Constant(value=float(batch_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_square_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_square_sum',
initializer=Constant(value=float(batch_square_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
means = helper.create_variable(dtype=dtype, stop_gradient=True)
scales = helper.create_variable(dtype=dtype, stop_gradient=True)
data_norm_out = input if in_place else helper.create_variable(dtype=dtype)
inputs = {
"X": input,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum
}
attrs = {
"epsilon": epsilon,
"sync_stats": sync_stats,
"summary_decay_rate": summary_decay_rate,
}
if slot_dim > 0:
attrs["slot_dim"] = slot_dim
if enable_scale_and_shift:
attrs["enable_scale_and_shift"] = enable_scale_and_shift
if enable_scale_and_shift:
inputs["scale_w"] = scale_w
inputs["bias"] = bias
helper.append_op(
type="data_norm",
inputs=inputs,
outputs={
"Y": data_norm_out,
"Means": means,
"Scales": scales,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum
},
attrs=attrs)
return helper.append_activation(data_norm_out)
@templatedoc()
def layer_norm(input,
scale=True,
shift=True,
begin_norm_axis=1,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
name=None):
r"""
:api_attr: Static Graph
**Layer Normalization Layer**
The API implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
The formula is as follows:
.. math::
\\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
\\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
- :math:`H`: the number of hidden units in a layers
- :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
- :math:`g`: the trainable scale parameter.
- :math:`b`: the trainable bias parameter.
Args:
input(Tensor): A multi-dimension ``Tensor`` , and the data type is float32 or float64.
scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True.
shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
normalization. Default: True.
begin_norm_axis(int, optional): The normalization will be performed along
dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
Default: 1.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
:attr:`param_attr` is initialized as 1 if it is added. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
:attr:`bias_attr` is initialized as 0 if it is added. Default: None.
act(str, optional): Activation to be applied to the output of layer normalization.
Default: None.
name(str): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: ``Tensor`` indicating the normalized result, the data type is the same as ``input`` , and the return dimension is the same as ``input`` .
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[8, 32, 32], dtype='float32')
output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1)
print(output.shape) # [8, 32, 32]
"""
assert in_dygraph_mode(
) is not True, "please use LayerNorm instead of layer_norm in dygraph mode!"
helper = LayerHelper('layer_norm', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'layer_norm')
dtype = helper.input_dtype()
# create intput and parameters
inputs = {'X': input}
input_shape = input.shape
param_shape = [reduce(lambda x, y: x * y, input_shape[begin_norm_axis:])]
if scale:
assert param_attr is not False, "param_attr should not be False when using scale."
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
inputs['Scale'] = scale
else:
if param_attr:
warnings.warn("param_attr is only available with scale is True.")
if shift:
assert bias_attr is not False, "bias_attr should not be False when using shift."
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
inputs['Bias'] = bias
else:
if bias_attr:
warnings.warn("bias_attr is only available with shift is True.")
# create output
mean_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
layer_norm_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": layer_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis})
return helper.append_activation(layer_norm_out)
@templatedoc()
def group_norm(input,
groups,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
data_layout='NCHW',
name=None):
"""
:api_attr: Static Graph
**Group Normalization Layer**
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Parameters:
input(Tensor): 4-D Tensor, the data type is float32 or float64.
groups(int): The number of groups that divided from channels, the data type
is int32.
epsilon(float, optional): The small value added to the variance to prevent
division by zero, the data type is float32. Default: 1e-05.
param_attr(ParamAttr|bool, optional): ParamAttr object that specifies weight parameter
attribute. If a bool type, only False is supported, which means there is no weight parameter.
Default: None, the default weight parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
bias_attr(ParamAttr|bool, optional): ParamAttr object that specifies bias parameter
attribute. If a bool type, only False is supported, which means there is no bias parameter.
Default: None, the default bias parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
act(str, optional): Activation to be applied to the output of group normalization.
data_layout(str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name (str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A 4-D Tensor has same data type and data format with `input`.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name='data', shape=[2, 8, 32, 32], dtype='float32')
x = paddle.static.nn.group_norm(input=data, groups=4)
print(x.shape) # [2, 8, 32, 32]
"""
helper = LayerHelper('group_norm', **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'group_norm')
# create intput and parameters
inputs = {'X': input}
input_shape = input.shape
if data_layout != 'NCHW' and data_layout != 'NHWC':
raise ValueError(
"Param(data_layout) of Op(fluid.layers.group_norm) got wrong value: received "
+ data_layout + " but only NCHW or NHWC supported.")
channel_num = input_shape[1] if data_layout == 'NCHW' else input_shape[-1]
param_shape = [channel_num]
if param_attr:
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
inputs['Scale'] = scale
if bias_attr:
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
inputs['Bias'] = bias
# create output
mean_out = helper.create_variable(dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable(dtype=dtype, stop_gradient=True)
group_norm_out = helper.create_variable(dtype=dtype)
helper.append_op(
type="group_norm",
inputs=inputs,
outputs={
"Y": group_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={
"epsilon": epsilon,
"groups": groups,
"data_layout": data_layout
})
return helper.append_activation(group_norm_out)
@templatedoc()
def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None):
r"""
:api_attr: Static Graph
**Spectral Normalization Layer**
This operation calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Output tensor will be in same shape with input tensor.
Calculations are showed as follows.
Step 1:
Generate vector U in shape of [H], and V in shape of [W].
While H is the :attr:`dim` th dimension of the input weights,
and W is the product result of remaining dimensions.
Step 2:
:attr:`power_iters` should be a positive integer, do following
calculations with U and V for :attr:`power_iters` rounds. Calculations
as follows:
.. math::
\mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}
\mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}
Step 3:
Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.
.. math::
\sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}
\mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})}
Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .
Args:
weight(Tensor): ${weight_comment}
dim(int): ${dim_comment}
power_iters(int): ${power_iters_comment}
eps(float): ${eps_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: A tensor of weight parameters after spectral normalization.
The data type and shape is same as input tensor.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
weight = paddle.static.data(name='weight', shape=[2, 8, 32, 32], dtype='float32')
x = paddle.static.nn.spectral_norm(weight=weight, dim=1, power_iters=2)
print(x.shape) # [2, 8, 32, 32]
"""
helper = LayerHelper('spectral_norm', **locals())
check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
'spectral_norm')
check_type(dim, 'dim', int, 'spectral_norm')
check_type(power_iters, 'power_iters', int, 'spectral_norm')
check_type(eps, 'eps', float, 'spectral_norm')
dtype = weight.dtype
# create intput and parameters
inputs = {'Weight': weight}
input_shape = weight.shape
h = input_shape[dim]
w = np.prod(input_shape) // h
u = helper.create_parameter(
attr=ParamAttr(),
shape=[h],
dtype=dtype,
default_initializer=Normal(0., 1.))
u.stop_gradient = True
inputs['U'] = u
v = helper.create_parameter(
attr=ParamAttr(),
shape=[w],
dtype=dtype,
default_initializer=Normal(0., 1.))
inputs['V'] = v
v.stop_gradient = True
# create output
out = helper.create_variable(dtype=dtype)
helper.append_op(
type="spectral_norm",
inputs=inputs,
outputs={"Out": out, },
attrs={
"dim": dim,
"power_iters": power_iters,
"eps": eps,
})
return out
def conv2d_transpose(input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCHW'):
r"""
:api_attr: Static Graph
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCHW or NHWC format. Where N is batch size, C is the number of channels,
H is the height of the feature, and W is the width of the feature.
Parameters(dilations, strides, paddings) are two elements. These two elements
represent height and width, respectively. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a 4-D Tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a 4-D Tensor with MCHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, a 4-D Tensor with data format 'NCHW' or 'NHWC', the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - pad_width_left - pad_width_right + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ] \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ]
Note:
The conv2d_transpose can be seen as the backward of the conv2d. For conv2d,
when stride > 1, conv2d maps multiple input shape to the same output shape,
so for conv2d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, W_{out} = W^\prime_{out}`;
else, the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[0]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[1]`,
conv2d_transpose can compute the kernel size automatically.
Args:
input(Tensor): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format,
its data type is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_height, image_width). None if use
filter_size, padding, and stride to calculate output_size.
If output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None. output_size and filter_size
should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None. filter_size and
output_size should not be None at the same time.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding(str|int|list|tuple, optional): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If `padding` is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain two integers, (dilation_height, dilation_width).
Otherwise, dilation_height = dilation_width = dilation. Default: dilation = 1.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups = 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Tensor representing the conv2d_transpose, whose
data type is the same with input and shape is (num_batches, channels, out_h,
out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor
storing the transposed convolution result, and if act is not None, the
tensor storing transposed convolution and non-linearity activation
result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 4-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3)
print(conv2d_transpose.shape) # [-1, 2, 34, 34]
"""
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(fluid.layers.conv2d_transpose) got wrong value: received "
+ data_format + " but only NCHW or NHWC supported.")
input_channel = input.shape[1] if data_format == 'NCHW' else input.shape[-1]
op_type = 'conv2d_transpose'
if (input_channel == groups and num_filters == input_channel and
not use_cudnn):
op_type = 'depthwise_conv2d_transpose'
helper = LayerHelper(op_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Variable")
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
else:
padding = utils.convert_to_list(padding, 2, 'padding')
padding = [padding[0], padding[0], padding[1], padding[1]]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size]
h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1]
w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2]
filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + padding[0] +
padding[1] - 1) // dilation[0] + 1
filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + padding[2] +
padding[3] - 1) // dilation[1] + 1
filter_size = [filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(filter_size, 2,
'conv2d_transpose.filter_size')
if len(padding) == 4 and utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'Input': [input],
'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
out = helper.append_activation(pre_act)
return out
def conv3d_transpose(input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCDHW'):
r"""
:api_attr: Static Graph
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW or NDHWC format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a Tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a Tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[2] ]
Note:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Args:
input(Tensor): The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C], the data type
of input is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain three integers, (image_depth, image_height, image_width). This
parameter only works when filter_size is None. If output_size and filter_size are
specified at the same time, They should follow the formula above. Default: None.
Output_size and filter_size should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size. None if use output size to
calculate filter_size. Default: None. filter_size and output_size should not be
None at the same time.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively
adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `'NCDHW'`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
Default: stride = 1.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups=1
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d_transpose, whose data
type is the same with input and shape is (num_batches, channels, out_d, out_h,
out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
variable storing the transposed convolution result, and if act is not None, the tensor
variable storing transposed convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
res = paddle.static.nn.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
output = exe.run(feed={"data": x}, fetch_list=[res])
print(output)
"""
assert param_attr is not False, "param_attr should not be False in conv3d_transpose."
if data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received "
+ data_format + " but only NCDHW or NDHWC supported.")
l_type = "conv3d_transpose"
helper = LayerHelper(l_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv3d_transpose must be Variable")
input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[
-1]
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
else:
padding = utils.convert_to_list(padding, 3, 'padding')
padding = [
padding[0], padding[0], padding[1], padding[1], padding[2],
padding[2]
]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size, output_size]
d_in = input.shape[2] if data_format == 'NCDHW' else input.shape[1]
h_in = input.shape[3] if data_format == 'NCDHW' else input.shape[2]
w_in = input.shape[4] if data_format == 'NCDHW' else input.shape[3]
filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + padding[0] +
padding[1] - 1) // dilation[0] + 1
filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + padding[2] +
padding[3] - 1) // dilation[1] + 1
filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + padding[4] +
padding[5] - 1) // dilation[2] + 1
filter_size = [filter_size_d, filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(filter_size, 3,
'conv3d_transpose.filter_size')
if len(padding) == 6 and utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 3, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
if data_format == 'NCDHW':
data_format = 'NCHW'
if data_format == 'NDHWC':
data_format = 'NHWC'
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=l_type,
inputs={'Input': [input],
'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
out = helper.append_activation(pre_act)
return out
def reduce_sum(input, dim=None, keep_dim=False, name=None):
"""
Computes the sum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of summation operation on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_sum(x) # [3.5]
fluid.layers.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6]
fluid.layers.reduce_sum(x, dim=-1) # [1.9, 1.6]
fluid.layers.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26]
fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20]
"""
if dim is not None and not isinstance(dim, list):
dim = [dim]
if in_dygraph_mode():
reduce_all = True if dim == None or dim == [] or len(dim) == len(
input.shape) else False
dim = dim if dim != None and dim != [] else [0]
return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
attrs = {
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True
if dim == None or dim == [] or len(dim) == len(input.shape) else False
}
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_sum',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
@deprecated(since="2.0.0", update_to="paddle.mean")
def reduce_mean(input, dim=None, keep_dim=False, name=None):
"""
Computes the mean of the input tensor's elements along the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimension along which the mean is computed. If
`None`, compute the mean over all elements of :attr:`input`
and return a variable with a single element, otherwise it
must be in the range :math:`[-rank(input), rank(input))`. If
:math:`dim[i] < 0`, the dimension to reduce is
:math:`rank(input) + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of average on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_mean(x) # [0.4375]
fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8]
fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4]
fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_mean(y, dim=[1, 2]) # [2.5, 6.5]
fluid.layers.reduce_mean(y, dim=[0, 1]) # [4.0, 5.0]
"""
return paddle.mean(x=input, axis=dim, keepdim=keep_dim, name=name)
def reduce_max(input, dim=None, keep_dim=False, name=None):
"""
Computes the maximum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimension along which the maximum is computed.
If :attr:`None`, compute the maximum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of maximum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_max(x) # [0.9]
fluid.layers.reduce_max(x, dim=0) # [0.2, 0.3, 0.6, 0.9]
fluid.layers.reduce_max(x, dim=-1) # [0.9, 0.7]
fluid.layers.reduce_max(x, dim=1, keep_dim=True) # [[0.9], [0.7]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_max(y, dim=[1, 2]) # [4.0, 8.0]
fluid.layers.reduce_max(y, dim=[0, 1]) # [7.0, 8.0]
"""
helper = LayerHelper('reduce_max', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_max',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def reduce_min(input, dim=None, keep_dim=False, name=None):
"""
Computes the minimum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the minimum is computed.
If :attr:`None`, compute the minimum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, result of minimum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_min(x) # [0.1]
fluid.layers.reduce_min(x, dim=0) # [0.1, 0.2, 0.5, 0.7]
fluid.layers.reduce_min(x, dim=-1) # [0.2, 0.1]
fluid.layers.reduce_min(x, dim=1, keep_dim=True) # [[0.2], [0.1]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_min(y, dim=[1, 2]) # [1.0, 5.0]
fluid.layers.reduce_min(y, dim=[0, 1]) # [1.0, 2.0]
"""
helper = LayerHelper('reduce_min', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_min',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def reduce_prod(input, dim=None, keep_dim=False, name=None):
"""
Computes the product of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (int|list|tuple, optional): The dimensions along which the product is performed. If
:attr:`None`, multiply all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, result of product on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_prod(x) # [0.0002268]
fluid.layers.reduce_prod(x, dim=0) # [0.02, 0.06, 0.3, 0.63]
fluid.layers.reduce_prod(x, dim=-1) # [0.027, 0.0084]
fluid.layers.reduce_prod(x, dim=1,
keep_dim=True) # [[0.027], [0.0084]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_prod(y, dim=[1, 2]) # [24.0, 1680.0]
fluid.layers.reduce_prod(y, dim=[0, 1]) # [105.0, 384.0]
"""
helper = LayerHelper('reduce_prod', **locals())
if dim is not None and not isinstance(dim, list):
if isinstance(dim, tuple):
dim = list(dim)
elif isinstance(dim, int):
dim = [dim]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".
format(type(dim)))
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod')
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_prod',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def reduce_all(input, dim=None, keep_dim=False, name=None):
"""
This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
Args:
input (Tensor): the input tensor, it's data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. The default value is None.
Returns:
Tensor, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [True, True]]
x = fluid.layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
x = fluid.layers.cast(x, 'bool')
out = fluid.layers.reduce_all(x) # False
out = fluid.layers.reduce_all(x, dim=0) # [True, False]
out = fluid.layers.reduce_all(x, dim=-1) # [False, True]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = fluid.layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all')
helper = LayerHelper('reduce_all', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_all',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def reduce_any(input, dim=None, keep_dim=False, name=None):
"""
This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
Args:
input (Tensor): the input tensor, it's data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [False, False]]
x = fluid.layers.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
x = fluid.layers.cast(x, 'bool')
out = fluid.layers.reduce_any(x) # True
out = fluid.layers.reduce_any(x, dim=0) # [True, False]
out = fluid.layers.reduce_any(x, dim=-1) # [True, False]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = fluid.layers.reduce_any(x, dim=1,
keep_dim=True) # [[True], [False]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
check_variable_and_dtype(input, 'input', ('bool'), 'reduce_any')
helper = LayerHelper('reduce_any', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_any',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] or
len(dim) == len(input.shape) else False
})
return out
def split(input, num_or_sections, dim=-1, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
input (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
num_or_sections (int|list|tuple): If ``num_or_sections`` is int, then the ``num_or_sections``
indicates the number of equal sized sub-Tensors that the ``input``
will be divided into. If ``num_or_sections`` is a list or tuple, the length of it
indicates the number of sub-Tensors and the elements in it indicate the sizes of sub-Tensors'
dimension orderly. The length of the list mustn't be larger than the ``input`` 's size of specified dim.
dim (int|Tensor, optional): The dimension along which to split, it can be a scalar with type ``int`` or
a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``. If :math:`dim < 0`,
the dimension to split along is :math:`rank(input) + dim`. Default is -1.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor): The list of segmented Tensors.
Example:
.. code-block:: python
import paddle.fluid as fluid
# input is a Tensor which shape is [3, 9, 5]
input = fluid.data(
name="input", shape=[3, 9, 5], dtype="float32")
out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=1)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
# dim is negative, the real dim is (rank(input) + axis) which real
# value is 1.
out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=-2)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
"""
if in_dygraph_mode():
num = None
attrs = ()
if isinstance(dim, Variable):
dim = dim.numpy()
dim = dim.item(0)
dim = (len(input.shape) + dim) if dim < 0 else dim
attrs += ('axis', dim)
if isinstance(num_or_sections, int):
num = num_or_sections
attrs += ('num', num_or_sections)
elif isinstance(num_or_sections, (list, tuple)):
num = len(num_or_sections)
if utils._contain_var(num_or_sections):
for index, item in enumerate(num_or_sections):
if isinstance(item, Variable):
num_or_sections[index] = num_or_sections[index].numpy()[
0]
attrs += ('sections', list(num_or_sections))
else:
attrs += ('sections', list(num_or_sections))
else:
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"received %s." % (type(num_or_sections)))
return core.ops.split(input, num, *attrs)
check_variable_and_dtype(
input, 'input',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'split')
check_type(num_or_sections, 'num_or_sections', (list, int, tuple), 'split')
check_type(dim, 'dim', (int, Variable), 'split')
if isinstance(dim, Variable):
check_dtype(dim.dtype, 'dim', ['int32', 'int64'], 'split')
helper = LayerHelper('split', **locals())
input_shape = input.shape
inputs = {'X': input}
attrs = {'num': num_or_sections if isinstance(num_or_sections, int) else 0}
def _get_SectionsTensorList(one_list):
tensor_list = []
unk_dim_idx = -1
for idx, dim_size in enumerate(one_list):
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
tensor_list.append(dim_size)
else:
assert (isinstance(dim_size, int))
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1." %
idx)
unk_dim_idx = idx
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out)
tensor_list.append(temp_out)
return tensor_list
if isinstance(dim, Variable):
dim.stop_gradient = True
inputs['AxisTensor'] = dim
else:
dim = (len(input_shape) + dim) if dim < 0 else dim
attrs['axis'] = dim
if isinstance(num_or_sections, int):
assert num_or_sections > 1, 'num_or_sections must be more than 1.'
if isinstance(dim, int) and input_shape[dim] > 0:
assert input_shape[dim] % num_or_sections ==0, \
"The input's size along the split dimension " \
"must be evenly divisible by Attr(num_or_sections). " \
"But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim])
num = num_or_sections
else:
if isinstance(dim, int) and input_shape[dim] > 0:
assert len(num_or_sections) <= input_shape[
dim], 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections)
attrs['sections'] = list(
map(lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections))
if utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections)
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs)
return outs
def l2_normalize(x, axis, epsilon=1e-12, name=None):
r"""
This op normalizes `x` along dimension `axis` using an L2
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
.. math::
y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }}
For `x` with more dimensions, this layer independently normalizes each 1-D
slice along dimension `axis`.
Args:
x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
axis(int): The axis on which to apply normalization. If `axis < 0`, \
the dimension to normalization is rank(X) + axis. -1 is the
last dimension.
epsilon(float): The epsilon value is used to avoid division by zero, \
the default value is 1e-12.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output has the same shape and data type with `x`.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[2,3])
output = fluid.layers.l2_normalize(x=input,axis=0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3).astype("float32")
print(input_data)
# [[0.5171216 0.12704141 0.56018186]
# [0.93251234 0.5382788 0.81709313]]
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([[0.48496857, 0.22970329, 0.56545246],
# [0.8745316 , 0.9732607 , 0.82478094]], dtype=float32)]
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.l2_normalize(x=input, axis=-1)
print(output.numpy())
# [[0.66907585 0.16437206 0.7247892 ]
# [0.6899054 0.3982376 0.6045142 ]]
"""
if len(x.shape) == 1:
axis = 0
check_variable_and_dtype(x, "X", ("float32", "float64"), "norm")
helper = LayerHelper("l2_normalize", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
norm = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="norm",
inputs={"X": x},
outputs={"Out": out,
"Norm": norm},
attrs={
"axis": 1 if axis is None else axis,
"epsilon": epsilon,
})
return out
@deprecated(since="2.0.0", update_to="paddle.matmul")
def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"""
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for
:math:`x` it is treated as :math:`[1, D]` in nontransposed form and as
:math:`[D, 1]` in transposed form, whereas for :math:`y` it is the
opposite: It is treated as :math:`[D, 1]` in nontransposed form and as
:math:`[1, D]` in transposed form.
- After transpose, the two tensors are 2-D or n-D and matrix multiplication
performs in the following way.
- If both are 2-D, they are multiplied like conventional matrices.
- If either is n-D, it is treated as a stack of matrices residing in the
last two dimensions and a batched matrix multiply supporting broadcast
applies on the two tensors.
Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
alpha (float): The scale of output. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], y: [B, ..., K, N]
# fluid.layers.matmul(x, y) # out: [B, ..., M, N]
# x: [B, M, K], y: [B, K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# x: [B, M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# x: [M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [M, N]
# x: [B, M, K], y: [K]
# fluid.layers.matmul(x, y) # out: [B, M]
# x: [K], y: [K]
# fluid.layers.matmul(x, y) # out: [1]
# x: [M], y: [N]
# fluid.layers.matmul(x, y, True, True) # out: [M, N]
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3], dtype='float32')
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True)
"""
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode():
out = _varbase_creator(dtype=x.dtype)
core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
transpose_y, 'alpha', float(alpha))
return out
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name, ['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if transpose_x:
x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2]
if transpose_y:
y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2]
if x_shape[-1] != y_shape[-2]:
assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \
"After performing an optional transpose, Input X's width should be " \
"equal to Y's width for multiplication " \
"prerequisites. But received X's shape: %s, Y's shape: %s\n" % \
(x_shape, y_shape)
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(x, y)
helper = LayerHelper('matmul', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def topk(input, k, name=None):
"""
:alias_main: paddle.topk
:alias: paddle.topk,paddle.tensor.topk,paddle.tensor.search.topk
:old_api: paddle.fluid.layers.topk
This OP is used to find values and indices of the k largest entries
for the last dimension.
If the input is a 1-D Tensor, finds the k largest entries and outputs
their values and indices.
If the input is a Tensor with higher rank, this operator computes the top k
entries along the last dimension.
.. code-block:: text
Case 1:
Input:
input.shape = [3, 4]
input.data = [[5, 4, 2, 3],
[9, 7, 10, 25],
[6, 2, 10, 1]]
k = 2
Output:
The first output:
values.shape = [3, 2]
values.data = [[5, 4],
[10, 25],
[6, 10]]
The second output:
indices.shape = [3, 2]
indices.data = [[0, 1],
[2, 3],
[0, 2]]
Args:
input(Variable): The input tensor. Support data types: float32, float64.
k(int | Variable): The number of top elements to look for along the last dimension
of input tensor.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Values (Variable): Input tensor's k largest elements along each last dimensional slice. The dimension is: :math:`input.shape[:-1]+[k]`.
Indices (Variable): Indices of k largest elements alone the last dimension of input. The dimension is same as values.
Raises:
ValueError: If :math:`k < 1` or :math:`k > last dimension of input`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
input = fluid.data(name="input", shape=[None, 13, 11], dtype='float32')
top5_values, top5_indices = layers.topk(input, k=5) # top5_values.shape[None, 13, 5], top5_indices.shape=[None, 13, 5]
# 1D Tensor
input1 = fluid.data(name="input1", shape=[None, 13], dtype='float32')
top5_values, top5_indices = layers.topk(input1, k=5) #top5_values.shape=[None, 5], top5_indices.shape=[None, 5]
# k=Variable
input2 = fluid.data(name="input2", shape=[None, 13, 11], dtype='float32')
vk = fluid.data(name="vk", shape=[None, 1], dtype='int32') # save k in vk.data[0]
vk_values, vk_indices = layers.topk(input2, k=vk) #vk_values.shape=[None, 13, k], vk_indices.shape=[None, 13, k]
"""
if in_dygraph_mode():
_k = k.numpy().item(0) if isinstance(k, Variable) else k
out, indices = core.ops.top_k(input, 'k', _k)
out.stop_gradient = True
indices.stop_gradient = True
return out, indices
inputs = {"X": [input]}
attrs = {}
if isinstance(k, Variable):
inputs['K'] = [k]
else:
attrs = {'k': k}
helper = LayerHelper("top_k", **locals())
values = helper.create_variable_for_type_inference(dtype=input.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="top_k",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
values.stop_gradient = True
indices.stop_gradient = True
return values, indices
def ctc_greedy_decoder(input,
blank,
input_length=None,
padding_value=0,
name=None):
r"""
This op is used to decode sequences by greedy policy by the following steps:
1. Get the indexes of maximum value for each row in input. a.k.a.
numpy.argmax(input, axis=0).
2. For each sequence in result of step1, merge repeated tokens between two
blanks and delete all blanks.
This op is implemented in two modes: lod and padding, either of them can be used.
The input can be either LoDTensor or Tensor, corresponding to lod and padding
mode respectively.
A simple example as below:
.. code-block:: text
Given:
(1) for lod mode:
input.data = [[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1],
[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]
input.lod = [[4, 4]]
Computation:
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]]
step2: merge repeated tokens and remove blank which is 0. Then we get first output sequence:
[[2], [1]]
Finally:
output.data = [[2],
[1],
[3]]
output.lod = [[2, 1]]
(2) for padding mode:
input.data = [[[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1]],
[[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]]
input_length.data = [[4], [4]]
input.shape = [2, 4, 4]
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1]
step2: Change the argmax result to use padding mode, then argmax result is
[[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]]
step3: Apply ctc_align to padding argmax result, padding_value is 0
Finally:
output.data = [[2, 1, 0, 0],
[3, 0, 0, 0]]
output_length.data = [[2], [1]]
Parameters:
input(Variable): the probabilities of variable-length sequences. When in lod mode,
it is a 2-D LoDTensor with LoD information. It's shape is [Lp, num_classes + 1]
where Lp is the sum of all input sequences' length and
num_classes is the true number of classes. When in padding mode,
it is a 3-D Tensor with padding, It's shape is [batch_size, N, num_classes + 1].
(not including the blank label). The data type can be float32 or float64.
blank(int): the blank label index of Connectionist Temporal
Classification (CTC) loss, which is in the half-opened
interval [0, num_classes + 1).
input_length(Variable, optional): 2-D LoDTensor, shape is [batch_size, 1], data type is int64.
It is used for padding mode. In lod mode, input_length is None.
padding_value(int): padding value.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
For lod mode, returns the result of CTC greedy decoder, 2-D LoDTensor, shape is [Lp, 1], \
data type is int64. 'Lp' is the sum of all output sequences' length. If all the sequences \
in result were empty, the result LoDTensor will be [-1] with empty \
LoD [[]].
For padding mode, returns a tuple of (output, output_length), which was described as below:
output, 2-D Tensor, shape is [batch_size, N], data type is int64.
output_length, 2-D Tensor, shape is [batch_size, 1], data type is int64. It is the length of \
each sequence of output for padding mode.
Return type:
For lod mode: Variable
For padding mode: tuple of two Variables (output, output_length).
Examples:
.. code-block:: python
# for lod mode
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1)
cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
# for padding mode
x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32')
x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64')
out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0,
input_length=x_pad_len)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'ctc_greedy_decoder')
helper = LayerHelper("ctc_greedy_decoder", **locals())
_, topk_indices = topk(input, k=1)
# ctc align op
ctc_out = helper.create_variable_for_type_inference(dtype="int64")
if input_length is None:
helper.append_op(
type="ctc_align",
inputs={"Input": [topk_indices]},
outputs={"Output": [ctc_out]},
attrs={"merge_repeated": True,
"blank": blank})
return ctc_out
else:
ctc_out_len = helper.create_variable_for_type_inference(dtype="int64")
ctc_input = squeeze(topk_indices, [2])
helper.append_op(
type="ctc_align",
inputs={"Input": [ctc_input],
"InputLength": [input_length]},
outputs={"Output": [ctc_out],
"OutputLength": [ctc_out_len]},
attrs={
"merge_repeated": True,
"blank": blank,
"padding_value": padding_value
})
return ctc_out, ctc_out_len
def transpose(x, perm, name=None):
"""
Permute the data dimensions of `input` according to `perm`.
The `i`-th dimension of the returned tensor will correspond to the
perm[i]-th dimension of `input`.
Args:
x (Tensor): The input Tensor. It is a N-D Tensor of data types float32, float64, int32.
perm (list|tuple): Permute the input according to the data of perm.
name (str): The name of this layer. It is optional.
Returns:
Tensor: A transposed n-D Tensor, with data type being float32, float64, int32, int64.
For Example:
.. code-block:: text
x = [[[ 1 2 3 4] [ 5 6 7 8] [ 9 10 11 12]]
[[13 14 15 16] [17 18 19 20] [21 22 23 24]]]
shape(x) = [2,3,4]
# Example 1
perm0 = [1,0,2]
y_perm0 = [[[ 1 2 3 4] [13 14 15 16]]
[[ 5 6 7 8] [17 18 19 20]]
[[ 9 10 11 12] [21 22 23 24]]]
shape(y_perm0) = [3,2,4]
# Example 2
perm1 = [2,1,0]
y_perm1 = [[[ 1 13] [ 5 17] [ 9 21]]
[[ 2 14] [ 6 18] [10 22]]
[[ 3 15] [ 7 19] [11 23]]
[[ 4 16] [ 8 20] [12 24]]]
shape(y_perm1) = [4,3,2]
Examples:
.. code-block:: python
import paddle
x = paddle.randn([2, 3, 4])
x_transposed = paddle.transpose(x, perm=[1, 0, 2])
print(x_transposed.shape)
# [3L, 2L, 4L]
"""
if in_dygraph_mode():
out, _ = core.ops.transpose2(x, 'axis', perm)
return out
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
check_type(perm, 'perm', (list, tuple), 'transpose')
if isinstance(perm, tuple):
perm = list(perm)
if len(perm) != len(x.shape):
raise ValueError(
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s." % (len(x.shape), len(perm)))
for idx, dim in enumerate(perm):
if dim >= len(x.shape):
raise ValueError(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d." % (idx, perm[idx], len(x.shape)))
helper = LayerHelper('transpose', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='transpose2',
inputs={'X': [x]},
outputs={'Out': [out],
'XShape': [x_shape]},
attrs={'axis': perm})
return out
def im2sequence(input,
filter_size=1,
stride=1,
padding=0,
input_image_size=None,
out_stride=1,
name=None):
r"""
:api_attr: Static Graph
Extracts image patches from the input tensor to form a tensor of shape
{input.batch_size * output_height * output_width, filter_size_height *
filter_size_width * input.channels}. This op use filter to scan images
and convert these images to sequences. After expanding, the number of time step are
output_height * output_width for an image, in which output_height and
output_width are calculated by below equation:
.. math::
output\_height = 1 + \
(padding\_up + padding\_down + input\_height - filter\_size\_height + stride\_height - 1) / stride\_height \\\\
output\_width = 1 + \
(padding\_left + padding\_right + input\_width - filter\_size\_width + stride\_width - 1) / stride\_width
And the dimension of each time step is filter_size_height * filter_size_width * input.channels.
Parameters:
input (Variable): The input should be a 4-D Tensor in :math:`NCHW` format. The data type is float32.
filter_size(int32 | List[int32]): The filter size. If filter_size is a List,
it must contain two integers, :math:`[filter\_size\_height, filter\_size\_width]` .
Otherwise, the filter size will be a square :math:`[filter\_size, filter\_size]` . Default is 1.
stride(int32 | List[int32]): The stride size. If stride is a List, it must
contain two integers, :math:`[stride\_height, stride\_width]` . Otherwise, the stride size will be a square :math:`[stride\_size, stride\_size]` . Default is 1.
padding(int32 | List[int32]): The padding size. If padding is a List, it can
contain four integers like :math:`[padding\_up, padding\_left, padding\_down, padding\_right]` to indicate
paddings of four direction. Or it can contain two integers :math:`[padding\_height, padding\_width]` which means
padding_up = padding_down = padding_height and
padding_left = padding_right = padding_width. Otherwise, a scalar padding means
padding_up = padding_down = padding_left = padding_right = padding.
Default is 0.
input_image_size(Variable, optional): the input contains image real size.It's dim
is :math:`[batchsize, 2]` . It is just for batch inference when not None. Default is None.
out_stride(int32 | List[int32]): The scaling of image through CNN. It is valid only when input_image_size is not None.
If out_stride is List, it must contain two integers,
:math:`[out\_stride\_height, out\_stride\_W]` . Otherwise,
the out_stride_height = out_stride_width = out_stride. Default is 1.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
The output is a 2-D LoDTensor with shape {input.batch\_size * output\_height * output\_width, \
filter\_size\_height * filter\_size\_width * input.channels}. The data type is float32.
Return Type: Variable
Examples:
.. code-block:: text
Given:
x = [[[[ 6. 2. 1.]
[ 8. 3. 5.]
[ 0. 2. 6.]]
[[ 2. 4. 4.]
[ 6. 3. 0.]
[ 6. 4. 7.]]]
[[[ 6. 7. 1.]
[ 5. 7. 9.]
[ 2. 4. 8.]]
[[ 1. 2. 1.]
[ 1. 3. 5.]
[ 9. 0. 8.]]]]
x.dims = {2, 2, 3, 3}
And:
filter = [2, 2]
stride = [1, 1]
padding = [0, 0]
Then:
output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.]
[ 2. 1. 3. 5. 4. 4. 3. 0.]
[ 8. 3. 0. 2. 6. 3. 6. 4.]
[ 3. 5. 2. 6. 3. 0. 4. 7.]
[ 6. 7. 5. 7. 1. 2. 1. 3.]
[ 7. 1. 7. 9. 2. 1. 3. 5.]
[ 5. 7. 2. 4. 1. 3. 9. 0.]
[ 7. 9. 4. 8. 3. 5. 0. 8.]]
output.dims = {8, 8}
output.lod = [[4, 4]]
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 32, 32],
dtype='float32')
output = fluid.layers.im2sequence(
input=data, stride=[1, 1], filter_size=[2, 2])
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['float32'], 'im2sequence')
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if isinstance(stride, int):
stride = [stride, stride]
if isinstance(padding, int):
padding = [padding, padding]
if len(padding) == 2:
padding.append(padding[0])
padding.append(padding[1])
inputs = {"X": input}
attrs = {"kernels": filter_size, "strides": stride, "paddings": padding}
if input_image_size:
if isinstance(out_stride, int):
out_stride = [out_stride, out_stride]
inputs["Y"] = input_image_size
attrs["out_stride"] = out_stride
helper = LayerHelper('im2sequence', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@templatedoc()
def row_conv(input, future_context_size, param_attr=None, act=None):
"""
:api_attr: Static Graph
${comment}
Args:
input (${x_type}): ${x_comment}.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc.
act (str): Non-linear activation to be applied to output variable.
Returns:
${out_comment}.
Examples:
.. code-block:: python
# for LodTensor inputs
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[9, 16],
dtype='float32', lod_level=1)
out = paddle.static.nn.row_conv(input=x, future_context_size=2)
# for Tensor inputs
x = paddle.static.data(name='x', shape=[9, 4, 16], dtype='float32')
out = paddle.static.nn.row_conv(input=x, future_context_size=2)
"""
helper = LayerHelper('row_conv', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'row_conv')
dtype = helper.input_dtype()
filter_shape = [future_context_size + 1, input.shape[-1]]
filter_param = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='row_conv',
inputs={'X': [input],
'Filter': [filter_param]},
outputs={'Out': [out]})
return helper.append_activation(out)
@templatedoc()
def multiplex(inputs, index, name=None):
"""
Based on the given index parameter, the OP selects a specific row from each input Tensor to construct the output Tensor.
If the input of this OP contains :math:`m` Tensors, where :math:`I_{i}` means the i-th input Tensor, :math:`i` between :math:`[0,m)` .
And :math:`O` means the output, where :math:`O[i]` means the i-th row of the output, then the output satisfies that :math:`O[i] = I_{index[i]}[i]` .
For Example:
.. code-block:: text
Given:
inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]],
[[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]],
[[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]],
[[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]]
index = [[3],[0],[1],[2]]
out = [[3,0,3,4], # out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4]
[0,1,3,4], # out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4]
[1,2,4,2], # out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2]
[2,3,3,4]] # out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4]
Args:
inputs (list): The input Tensor list. The list elements are N-D Tensors of data types float32, float64, int32, int64. All input Tensor shapes should be the same and rank must be at least 2.
index (Tensor): Used to select some rows in the input Tensor to construct an index of the output Tensor. It is a 2-D Tensor with data type int32 or int64 and shape [M, 1], where M is the number of input Tensors.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: Output of multiplex OP, with data type being float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle
import numpy as np
img1 = np.array([[1, 2], [3, 4]]).astype(np.float32)
img2 = np.array([[5, 6], [7, 8]]).astype(np.float32)
inputs = [paddle.to_tensor(img1), paddle.to_tensor(img2)]
index = paddle.to_tensor(np.array([[1], [0]]).astype(np.int32))
res = paddle.multiplex(inputs, index)
print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
"""
if in_dygraph_mode():
return core.ops.multiplex(index, inputs)
helper = LayerHelper('multiplex', **locals())
check_type(inputs, 'inputs', (list), 'multiplex')
if len(inputs) < 2:
raise ValueError(
"inputs should be a list object with at least 2 elements.")
for id, x in enumerate(inputs):
check_variable_and_dtype(x, 'input[' + str(id) + ']',
['float32', 'float64', 'int32', 'int64'],
'multiplex')
check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex')
out = helper.create_variable_for_type_inference(inputs[0].dtype)
helper.append_op(
type='multiplex',
inputs={'X': inputs,
'Ids': index},
outputs={'Out': [out]})
return out
def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
"""
This layer computes the smooth L1 loss for Variable :attr:`x` and :attr:`y`.
It takes the first dimension of :attr:`x` and :attr:`y` as batch size.
For each instance, it computes the smooth L1 loss element by element first
and then sums all the losses. So the shape of output Variable is
[batch_size, 1].
Args:
x (Variable): A tensor with rank at least 2. The input value of smooth
L1 loss op with shape [batch_size, dim1, ..., dimN].
A LoDTensor or Tensor with type float32.
y (Variable): A tensor with rank at least 2. The target value of smooth
L1 loss op with same shape as :attr:`x`.
A LoDTensor or Tensor with type float32.
inside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the result of (:attr:`x` - :attr:`y`) will be multiplied
by this tensor element by element.
A Tensor with type float32.
outside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the out smooth L1 loss will be multiplied by this tensor
element by element.
A Tensor with type float32.
sigma (float|None): Hyper parameter of smooth L1 loss layer. A float
scalar with default value 1.0.
Returns:
Variable: The output smooth L1 loss with shape [batch_size, 1]. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
data = fluid.data(name="x", shape=[-1, 3], dtype="float32")
label = fluid.data(name="y", shape=[-1, 3], dtype="float32")
result = fluid.layers.smooth_l1(data,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,3).astype("float32")
y = np.random.rand(3,3).astype("float32")
output= exe.run(feed={"x":x, "y":y},
fetch_list=[result])
print(output)
#[array([[0.08220536],
# [0.36652038],
# [0.20541131]], dtype=float32)]
"""
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'smooth_l1_loss')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'smooth_l1_loss')
helper = LayerHelper('smooth_l1_loss', **locals())
diff = helper.create_variable_for_type_inference(dtype=x.dtype)
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='smooth_l1_loss',
inputs={
'X': x,
'Y': y,
'InsideWeight': inside_weight,
'OutsideWeight': outside_weight
},
outputs={'Diff': diff,
'Out': loss},
attrs={'sigma': sigma if sigma is not None else 1.0})
return loss
@deprecated(since='2.0.0', update_to='paddle.nn.functional.one_hot')
def one_hot(input, depth, allow_out_of_range=False):
"""
**WARING:** This OP requires the last dimension of Tensor shape must be equal to 1.
This OP will be deprecated in a future release. It is recommended to use fluid. :ref:`api_fluid_one_hot` .
The operator converts each id in the input to an one-hot vector with a
:attr:`depth` length. The value in the vector dimension corresponding to the id
is 1, and the value in the remaining dimension is 0.
The shape of output Tensor or LoDTensor is generated by adding :attr:`depth` dimension
behind the last dimension of the input shape.
.. code-block:: text
Example 1 (allow_out_of_range=False):
input:
X.shape = [4, 1]
X.data = [[1], [1], [3], [0]]
depth = 4
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]]
Example 2 (allow_out_of_range=True):
input:
X.shape = [4, 1]
X.data = [[1], [1], [5], [0]]
depth = 4
allow_out_of_range = True
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.], # This id is 5, which goes beyond depth, so set it all-zeros data.
[1., 0., 0., 0.]]
Example 3 (allow_out_of_range=False):
input:
X.shape = [4, 1]
X.data = [[1], [1], [5], [0]]
depth = 4
allow_out_of_range = False
output: Throw an exception for Illegal value
The second dimension in X is 5, which is greater than depth.
Allow_out_of_range =False means that does not allow the word id to exceed depth,
so it throws an exception.
Args:
input(Variable): Tensor or LoDTensor with shape :math:`[N_1, N_2, ..., N_k, 1]` ,
which contains at least one dimension and the last dimension must be 1.
The data type is int32 or int64.
depth(scalar): An integer defining the :attr:`depth` of the one hot dimension. If input
is word id, depth is generally the dictionary size.
allow_out_of_range(bool): A bool value indicating whether the input
indices could be out of range :math:`[0, depth)` . When input indices are
out of range, exceptions :code:`Illegal value` is raised if :attr:`allow_out_of_range`
is False, or zero-filling representations is created if it is set True.
Default: False.
Returns:
Variable: The one-hot representations of input. A Tensor or LoDTensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# Correspond to the first example above, where label.shape is [4, 1] and one_hot_label.shape is [4, 4].
label = fluid.data(name="label", shape=[4, 1], dtype="int64")
one_hot_label = fluid.layers.one_hot(input=label, depth=4)
"""
if in_dygraph_mode():
if isinstance(depth, Variable):
depth = depth.numpy()
assert depth.shape == (
1, ), "depth of type Variable should have shape [1]"
depth = depth.item(0)
out = core.ops.one_hot(input, 'depth', depth, 'allow_out_of_range',
allow_out_of_range)
out.stop_gradient = True
return out
helper = LayerHelper("one_hot", **locals())
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot')
check_type(depth, 'depth', (six.integer_types, Variable), 'one_hot')
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(depth, Variable):
# user attribute
inputs = {'X': input}
attrs = {'depth': depth, 'allow_out_of_range': allow_out_of_range}
else:
depth.stop_gradient = True
inputs = {'X': input, 'depth_tensor': depth}
attrs = {'allow_out_of_range': allow_out_of_range}
helper.append_op(
type="one_hot",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out})
one_hot_out.stop_gradient = True
return one_hot_out
def autoincreased_step_counter(counter_name=None, begin=1, step=1):
"""
:api_attr: Static Graph
Create an auto-increase variable. which will be automatically increased
by 1 in every iteration. By default, the first return of this counter is 1,
and the step size is 1.
Args:
counter_name(str, optional): The counter name. Default '@STEP_COUNTER@'.
begin(int, optional): The first return value of this counter. Default 1.
step(int, optional): The step size. Default 1.
Returns:
Variable: The auto-increased Variable with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
global_step = fluid.layers.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
"""
helper = LayerHelper('global_step_counter')
if counter_name is None:
counter_name = '@STEP_COUNTER@'
counter, is_new_var = helper.create_or_get_global_variable(
name=counter_name,
dtype='int64',
shape=[1],
persistable=True,
belong_to_optimizer=True)
if is_new_var:
helper.set_variable_initializer(
counter, initializer=Constant(
value=begin - 1, force_cpu=True))
helper.main_program.global_block()._prepend_op(
type='increment',
inputs={'X': [counter]},
outputs={'Out': [counter]},
attrs={'step': float(step)})
counter.stop_gradient = True
return counter
def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
r"""
:alias_main: paddle.reshape
:alias: paddle.reshape,paddle.tensor.reshape,paddle.tensor.manipulation.reshape
This operator changes the shape of ``x`` without changing its data.
The target shape can be given by ``shape`` or ``actual_shape``.
When ``shape`` and ``actual_shape`` are set at the same time,
``actual_shape`` has a higher priority than ``shape``
but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to
guarantee shape inference in compile-time.
Some tricks exist when specifying the target shape.
1. -1 means the value of this dimension is inferred from the total element
number of x and remaining dimensions. Thus one and only one dimension can
be set -1.
2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The index of 0s in shape can not exceed
the dimension of x.
Here are some examples to explain it.
1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [6, 8], the reshape operator will transform x into a 2-D tensor with
shape [6, 8] and leaving x's data unchanged.
2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
specified is [2, 3, -1, 2], the reshape operator will transform x into a
4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
case, one dimension of the target shape is set to -1, the value of this
dimension is inferred from the total element number of x and remaining
dimensions.
3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x.
**Note**:
The parameter ``actual_shape`` will be deprecated in the future and only use ``shape`` instead to represent the target shape.
Args:
x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Tensor, it should be an 1-D Tensor .
actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape
according to this given shape rather than ``shape`` specifying shape.
That is to say ``actual_shape`` has a higher priority
than ``shape(list|tuple)`` but not ``shape(Tensor)``. \
This argument ``actual_shape`` will be removed in a future version. \
Instructions for updating: ``actual_shape`` will be removed in future versions and replaced by ``shape``.
act (str, optional): The non-linear activation to be applied to the reshaped input. Default None.
inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape``
are the same variable. Otherwise, the input and output of
``layers.reshape`` are different variable. Default False. Note that if ``x``
is more than one OPs' input, ``inplace`` must be False.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A reshaped Tensor with the same data type as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
# example 1:
# attr shape is a list which doesn't contain Tensors.
data_1 = fluid.data(
name='data_1', shape=[2, 4, 6], dtype='float32')
reshaped_1 = fluid.layers.reshape(
x=data_1, shape=[-1, 0, 3, 2])
# the shape of reshaped_1 is [2,4,3,2].
# example 2:
# attr shape is a list which contains Tensors.
data_2 = fluid.layers.fill_constant([2,25], "int32", 3)
dim = fluid.layers.fill_constant([1], "int32", 5)
reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10])
# the shape of reshaped_2 is [5,10].
# example 3:
data_3 = fluid.data(
name="data_3", shape=[2,4,6], dtype='float32')
reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8])
# the shape of reshaped_3 is [6,8].
"""
if in_dygraph_mode():
#TODO(zhiqiu): enable inplace in dygraph mode.
if inplace:
warnings.warn(
"Inplace on reshape is not allowed and will be discarded in dygraph mode currently."
)
if isinstance(shape, (list, tuple)):
shape = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in shape
]
out, _ = core.ops.reshape2(x, None, 'shape', shape)
elif isinstance(shape, Variable):
shape.stop_gradient = True
out, _ = core.ops.reshape2(x, shape)
return dygraph_utils._append_activation_in_dygraph(out, act)
check_variable_and_dtype(x, 'x', [
'float16', 'float32', 'float64', 'int32', 'int64', 'bool', 'uint16'
], 'reshape')
check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
helper = LayerHelper("reshape2", **locals())
def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one dimension value of 'shape' in reshape can "
"be -1. But received shape[%d] is also -1." % dim_idx)
unk_dim_idx = dim_idx
elif dim_size == 0:
assert dim_idx < len(x.shape), (
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d." %
(dim_idx, len(x.shape)))
else:
assert dim_size > 0, (
"Each dimension value of 'shape' in reshape must not "
"be negative except one unknown dimension. "
"But received shape[%d] = %s." %
(dim_idx, str(dim_size)))
return attrs_shape
inputs = {"X": x}
attrs = {}
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape)
elif isinstance(actual_shape, Variable):
actual_shape.stop_gradient = True
inputs["Shape"] = actual_shape
out = x if inplace else helper.create_variable_for_type_inference(
dtype=x.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="reshape2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out,
"XShape": x_shape})
return helper.append_activation(out)
def squeeze(input, axes, name=None):
"""
This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal
to one will be deleted.
.. code-block:: text
Case1:
Input:
X.shape = (1, 3, 1, 5)
axes = [0]
Output:
Out.shape = (3, 1, 5)
Case2:
Input:
X.shape = (1, 3, 1, 5)
axes = []
Output:
Out.shape = (3, 5)
Case3:
Input:
X.shape = [1,3,1,5]
axes = [-2]
Output:
Out.shape = [1,3,5]
Args:
input (Variable): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.
axes (list): One integer or List of integers, indicating the dimensions to be squeezed.
Axes range is :math:`[-rank(input), rank(input))`.
If axes is negative, :math:`axes=axes+rank(input)`.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Variable: Output squeezed Tensor. Data type is same as input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x = fluid.data(name='x', shape=[None, 5, 1, 10])
y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10]
"""
if in_dygraph_mode():
out, _ = core.ops.squeeze2(input, 'axes', axes)
return out
helper = LayerHelper("squeeze", **locals())
check_variable_and_dtype(
input, 'input',
['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'],
'squeeze')
check_type(axes, 'axis/axes', (list, tuple), 'squeeze')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="squeeze2",
inputs={"X": input},
attrs={"axes": axes},
outputs={"Out": out,
"XShape": x_shape})
return out
def unsqueeze(input, axes, name=None):
"""
Insert single-dimensional entries to the shape of a Tensor. Takes one
required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
For example:
.. code-block:: text
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
Args:
input (Variable): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor .
name (str|None): Name for this layer.
Returns:
Variable: Unsqueezed Tensor, with the same data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[5, 10])
y = fluid.layers.unsqueeze(input=x, axes=[1])
"""
if in_dygraph_mode():
if isinstance(axes, int):
axes = [axes]
elif isinstance(axes, Variable):
axes = axes.numpy().tolist()
elif isinstance(axes, (list, tuple)):
axes = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in axes
]
out, _ = core.ops.unsqueeze2(input, 'axes', axes)
return out
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_variable_and_dtype(
input, 'input',
['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'],
'unsqueeze')
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="unsqueeze2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out,
"XShape": x_shape})
return out
def lod_reset(x, y=None, target_lod=None):
"""
Set LoD of :attr:`x` to a new one specified by :attr:`y` or
:attr:`target_lod`. When :attr:`y` provided, :attr:`y.lod` would be
considered as target LoD first, otherwise :attr:`y.data` would be
considered as target LoD. If :attr:`y` is not provided, target LoD should
be specified by :attr:`target_lod`. If target LoD is specified by
:attr:`y.data` or :attr:`target_lod`, only one level LoD is supported.
.. code-block:: text
* Example 1:
Given a 1-level LoDTensor x:
x.lod = [[ 2, 3, 1 ]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
target_lod: [4, 2]
then we get a 1-level LoDTensor:
out.lod = [[4, 2]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 2:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a Tensor:
y.data = [[2, 4]]
y.dims = [1, 3]
then we get a 1-level LoDTensor:
out.lod = [[2, 4]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 3:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a 2-level LoDTensor:
y.lod = [[2, 2], [2, 2, 1, 1]]
y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]]
y.dims = [6, 1]
then we get a 2-level LoDTensor:
out.lod = [[2, 2], [2, 2, 1, 1]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
Args:
x (Variable): Input variable which could be a Tensor or LoDTensor.
The data type should be int32, int64, float32 or float64.
y (Variable, optional): If provided, output's LoD would be derived from :attr:`y`.
If y's lod level>0, the data type can be any type.
If y's lod level=0, the data type should be int32.
target_lod (list|tuple, optional): One level LoD which should be considered
as target LoD when :attr:`y` not provided.
Returns:
Variable: Output variable with LoD specified by this layer.
Raises:
ValueError: If :attr:`y` and :attr:`target_lod` are both None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
y = fluid.layers.data(name='y', shape=[10, 20], lod_level=2)
out = fluid.layers.lod_reset(x=x, y=y)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'lod_reset')
helper = LayerHelper("lod_reset", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if y is not None:
check_type(y, 'y', (Variable), 'lod_reset')
#TODO: check y.lod_level = 0 dtype
helper.append_op(
type="lod_reset", inputs={'X': x,
'Y': y}, outputs={'Out': out})
elif target_lod is not None:
helper.append_op(
type="lod_reset",
inputs={'X': x},
attrs={'target_lod': target_lod},
outputs={'Out': out})
else:
raise ValueError("y and target_lod should not be both none.")
return out
def lod_append(x, level):
"""
Append level to LoD of :attr:`x`.
.. code-block:: text
* Example 1:
given a 1-level LoDTensor x:
x.lod = [[ 2, 3, 1 ]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
level: [1, 1, 1, 1, 1, 1, 1]
then we get a 2-level LoDTensor:
x.lod = [[ 2, 3, 1 ], [1, 1, 1, 1, 1, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
Args:
x (Variable): Input variable which could be a tensor or LoDTensor.
The data type should be int32, int64, float32 or float64.
level (list|tuple|Variable, optional): The LoD level to be appended into LoD of x.
If level is variable and its lod level>0, the data type can be any type.
If level is variable and its lod level=0, the data type should be int32.
Returns:
Variable: Output variable with new LoD level.
Raises:
ValueError: If :attr:`y` is None or and :attr:`level` is not Iterator.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[6, 10], lod_level=1)
out = fluid.layers.lod_append(x, [1,1,1,1,1,1])
"""
from collections import Iterable
if x is None:
raise ValueError("Input(x) can't be None.")
if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)):
raise ValueError("Input(level) must be list, tuple or Variable.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'lod_append')
helper = LayerHelper("lod_append", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
inputs = {'X': x}
attrs = {'append': True}
if isinstance(level, Variable):
inputs['Y'] = level
#TODO: check y.lod_level = 0 dtype
else:
attrs['target_lod'] = level
helper.append_op(
type="lod_reset", inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None,
data_format='NCHW'):
r"""
:alias_main: paddle.nn.functional.lrn
:alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn
:old_api: paddle.fluid.layers.lrn
This operator implements the Local Response Normalization Layer.
This layer performs a type of "lateral inhibition" by normalizing over local input regions.
For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
The formula is as follows:
.. math::
Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta}
In the above equation:
- :math:`n` : The number of channels to sum over.
- :math:`k` : The offset (avoid being divided by 0).
- :math:`\\alpha` : The scaling parameter.
- :math:`\\beta` : The exponent parameter.
Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W] or [N, H, W, C],
where N is the batch size, C is the input channel, H is Height, W is weight. The data
type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError.
n (int, optional): The number of channels to sum over. Default: 5
k (float, optional): An offset, positive. Default: 1.0
alpha (float, optional): The scaling parameter, positive. Default:1e-4
beta (float, optional): The exponent, positive. Default:0.75
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: A tensor variable storing the transformation result with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(
name="data", shape=[None, 3, 112, 112], dtype="float32")
lrn = fluid.layers.lrn(input=data)
print(lrn.shape) # [-1, 3, 112, 112]
print(lrn.dtype) # float32
"""
helper = LayerHelper('lrn', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'lrn')
dtype = helper.input_dtype()
input_shape = input.shape
dims = len(input_shape)
if dims != 4:
raise ValueError(
"Input's dimension size of Op(lrn) must be 4, but received %d." %
(dims))
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(lrn) got wrong value: received " +
data_format + " but only NCHW or NHWC supported.")
mid_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
lrn_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="lrn",
inputs={"X": input},
outputs={
"Out": lrn_out,
"MidOut": mid_out,
},
attrs={
"n": n,
"k": k,
"alpha": alpha,
"beta": beta,
"data_format": data_format
})
return lrn_out
def pad(x, paddings, pad_value=0., name=None):
r"""
:alias_main: paddle.nn.functional.pad
:alias: paddle.nn.functional.pad,paddle.nn.functional.common.pad
:old_api: paddle.fluid.layers.pad
This op will pad a tensor with a constant value given by :attr:`pad_value`, and the
padded shape is specified by :attr:`paddings`.
Specifically, the number of values padded before the elements of :attr:`x`
in dimension :attr:`i` is indicated by :attr:`paddings[2*i]`, and the number
of values padded after the elements of :attr:`x` in dimension :attr:`i` is
indicated by :attr:`paddings[2*i+1]`.
See below for an example.
.. code-block:: text
Given:
x = [[1, 2], [3, 4]]
paddings = [0, 1, 1, 2]
pad_value = 0
Return:
out = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]]
Args:
x (Variable): Tensor, data type is float32.
paddings (list): A list of integers. Its elements specify the padded
width before and after each dimension in turn.
The length of :attr:`paddings` must be equal to
:math:`rank(x) \\times 2`.
pad_value (float): The constant value used to pad.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The padded tensor, with the same data type and rank as :attr:`x`
Return Type:
Variable
Examples:
.. code-block:: python
# x is a rank 2 tensor variable
import paddle.fluid as fluid
x = fluid.data(name='data', shape=[300, 300], dtype='float32')
out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.)
"""
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], "pad")
helper = LayerHelper('pad', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad',
inputs={'X': x},
outputs={'Out': out},
attrs={'paddings': paddings,
'pad_value': float(pad_value)})
return out
def pad_constant_like(x, y, pad_value=0., name=None):
r"""
Pad :attr:`y` with :attr:`pad_value`, the number of values padded to
the edges of each axis is specified by the difference of the shape
of :attr:`x` and :attr:`y` . ((0, shape_x_0 - shape_y_0), ... (0, shape_x_n - shape_y_n))
specify padding widths for each axis. The input should be a k-D tensor(k > 0 and k < 7).
See below for an example.
.. code-block:: text
Given:
X = [[[[ 0, 1, 2],
[ 3, 4, 5]],
[[ 6, 7, 8],
[ 9, 10, 11]],
[[12, 13, 14],
[15, 16, 17]]],
[[[18, 19, 20],
[21, 22, 23]],
[[24, 25, 26],
[27, 28, 29]],
[[30, 31, 32],
[33, 34, 35]]]]
X.shape = (2, 3, 2, 3)
Y = [[[[35, 36, 37]],
[[38, 39, 40]],
[[41, 42, 43]]]]
Y.shape = (1, 3, 1, 3)
And
pad_value = 0.
Return:
Out = [[[[35, 36, 37],
[ 0, 0, 0]],
[[38, 39, 40],
[ 0, 0, 0]],
[[41, 42, 43],
[ 0, 0, 0]]],
[[[ 0, 0, 0],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0]]]]
Out.shape = [2, 3, 2, 3]
Args:
x (Variable): Tensor, its shape specifies the shape of output.
y (Variable): Tensor, its rank is the same with :attr:`x`, and for each dimension :math:`i` ,
:math:`y\_shape[i] <= x\_shape[i]` . The data type can be float32 or float64.
pad_value (float): The constant value used to pad.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The padded tensor, with the same shape as :attr:`x` and the same data type as :attr:`y`
Return Type:
Variable
Examples:
.. code-block:: python
# x is a rank 4 tensor variable, x.shape = (2, 3, 2, 3)
# y is a rank 4 tensor variable, y.shape = (1, 3, 1, 3)
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32')
y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32')
out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.)
# out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3]
"""
check_type(x, 'x', (Variable), 'pad_constant_like')
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
"pad_constant_like")
helper = LayerHelper('pad_constant_like', **locals())
dtype = helper.input_dtype(input_param_name='y')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad_constant_like',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'pad_value': float(pad_value)})
return out
def label_smooth(label,
prior_dist=None,
epsilon=0.1,
dtype="float32",
name=None):
r"""
:alias_main: paddle.nn.functional.label_smooth
:alias: paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth
:old_api: paddle.fluid.layers.label_smooth
Label smoothing is a mechanism to regularize the classifier layer and is called
label-smoothing regularization (LSR).
Label smoothing is proposed to encourage the model to be less confident,
since optimizing the log-likelihood of the correct label directly may
cause overfitting and reduce the ability of the model to adapt. Label
smoothing replaces the ground-truth label :math:`y` with the weighted sum
of itself and some fixed distribution :math:`\mu`. For class :math:`k`,
i.e.
.. math::
\\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k,
where :math:`1 - \epsilon` and :math:`\epsilon` are the weights
respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually
uniform distribution is used for :math:`\mu`.
See more details about label smoothing in https://arxiv.org/abs/1512.00567.
Parameters:
label(Variable): The input variable containing the label data. The
label data should use one-hot representation. It's
a multidimensional tensor with a shape of
:math:`[N_1, ..., Depth]`, where Depth is class number. The dtype can be "float32" and "float64".
prior_dist(Variable, optional): The prior distribution to be used to smooth
labels. If not provided, an uniform distribution
is used. It's a multidimensional tensor with a shape of
:math:`[1, class\_num]` . The default value is None.
epsilon(float, optional): The weight used to mix up the original ground-truth
distribution and the fixed distribution. The default value is
0.1.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type can be set
as 'float32', 'float64'. The default value is 'float32'.
name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to
:ref:`api_guide_Name`.
Returns:
Variable: The tensor variable containing the smoothed labels.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
label = layers.data(name="label", shape=[1], dtype="int32")
one_hot_label = layers.one_hot(input=label, depth=10)
smooth_label = layers.label_smooth(
label=one_hot_label, epsilon=0.1, dtype="float32")
"""
if epsilon > 1. or epsilon < 0.:
raise ValueError("The value of epsilon must be between 0 and 1.")
if in_dygraph_mode():
return core.ops.label_smooth(label, prior_dist, 'epsilon',
float(epsilon))
check_variable_and_dtype(label, 'label', ['float32', 'float64'],
'label_smooth')
helper = LayerHelper("label_smooth", **locals())
label.stop_gradient = True
smooth_label = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="label_smooth",
inputs={"X": label,
"PriorDist": prior_dist} if prior_dist else {"X": label},
outputs={"Out": smooth_label},
attrs={"epsilon": float(epsilon)})
return smooth_label
@templatedoc()
def roi_pool(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
rois_num=None,
name=None):
"""
This operator implements the roi_pooling layer.
Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).
The operator has three steps:
1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height;
2. Finding the largest value in each section;
3. Copying these max values to the output buffer.
For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn
Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W], where N is the batch size, C is the input channel, H is Height, W is weight. The data type is float32 or float64.
rois (Variable): ROIs (Regions of Interest) to pool over. 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates.
pooled_height (int, optional): The pooled output height, data type is int32. Default: 1
pooled_width (int, optional): The pooled output height, data type is int32. Default: 1
spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0
rois_num (Tensor): The number of RoIs in each image. Default: None
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: The pooled feature, 4D-Tensor with the shape of [num_rois, C, pooled_height, pooled_width].
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
DATATYPE='float32'
place = fluid.CPUPlace()
#place = fluid.CUDAPlace(0)
input_data = np.array([i for i in range(1,17)]).reshape(1,1,4,4).astype(DATATYPE)
roi_data =fluid.create_lod_tensor(np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., 3.]]).astype(DATATYPE),[[2]], place)
rois_num_data = np.array([2]).astype('int32')
x = fluid.data(name='input', shape=[None,1,4,4], dtype=DATATYPE)
rois = fluid.data(name='roi', shape=[None,4], dtype=DATATYPE)
rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32')
pool_out = fluid.layers.roi_pool(
input=x,
rois=rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
rois_num=rois_num)
exe = fluid.Executor(place)
out, = exe.run(feed={'input':input_data ,'roi':roi_data, 'rois_num': rois_num_data}, fetch_list=[pool_out.name])
print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32)
print(np.array(out).shape) # (2, 1, 1, 1)
"""
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
pool_out, argmaxes = core.ops.roi_pool(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale)
return pool_out, argmaxes
check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool')
check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool')
helper = LayerHelper('roi_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
argmaxes = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": input,
"ROIs": rois,
}
if rois_num is not None:
inputs['RoisNum'] = rois_num
helper.append_op(
type="roi_pool",
inputs=inputs,
outputs={"Out": pool_out,
"Argmax": argmaxes},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale
})
return pool_out
@templatedoc()
def roi_align(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
sampling_ratio=-1,
rois_num=None,
name=None):
"""
${comment}
Args:
input (Variable): ${x_comment}
rois (Variable): ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor of shape (num_rois, 4), the lod level is 1. The
data type is float32 or float64. Given as [[x1, y1, x2, y2], ...],
(x1, y1) is the top left coordinates, and (x2, y2) is the bottom
right coordinates.
pooled_height (int32, optional): ${pooled_height_comment} Default: 1
pooled_width (int32, optional): ${pooled_width_comment} Default: 1
spatial_scale (float32, optional): ${spatial_scale_comment} Default: 1.0
sampling_ratio(int32, optional): ${sampling_ratio_comment} Default: -1
rois_num (Tensor): The number of RoIs in each image. Default: None
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
Output: ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(
name='data', shape=[None, 256, 32, 32], dtype='float32')
rois = fluid.data(
name='rois', shape=[None, 4], dtype='float32')
rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32')
align_out = fluid.layers.roi_align(input=x,
rois=rois,
pooled_height=7,
pooled_width=7,
spatial_scale=0.5,
sampling_ratio=-1,
rois_num=rois_num)
"""
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
align_out = core.ops.roi_align(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale,
"sampling_ratio", sampling_ratio)
return align_out
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'roi_align')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'roi_align')
helper = LayerHelper('roi_align', **locals())
dtype = helper.input_dtype()
align_out = helper.create_variable_for_type_inference(dtype)
inputs = {
"X": input,
"ROIs": rois,
}
if rois_num is not None:
inputs['RoisNum'] = rois_num
helper.append_op(
type="roi_align",
inputs=inputs,
outputs={"Out": align_out},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale,
"sampling_ratio": sampling_ratio
})
return align_out
def dice_loss(input, label, epsilon=0.00001, name=None):
r"""
Dice loss for comparing the similarity between the input predictions and the label.
This implementation is for binary classification, where the input is sigmoid
predictions of each pixel, usually used for segmentation task. The dice loss can
be defined as the following equation:
.. math::
dice\_loss &= 1 - \\frac{2 * intersection\_area}{total\_area} \\\\
&= \\frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\\\
&= \\frac{(union\_area - intersection\_area)}{total\_area}
Parameters:
input (Tensor): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_D]`, where :math:`N_1` is
the batch_size, :math:`N_D` is 1. It is usually the output predictions of sigmoid activation.
The data type can be float32 or float64.
label (Tensor): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_D]`.
where :math:`N_1` is the batch_size, :math:`N_D` is 1. The data type can be float32 or float64.
epsilon (float): The epsilon will be added to the numerator and denominator.
If both input and label are empty, it makes sure dice is 1.
Default: 0.00001
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, which shape is [1], data type is the same as `input` .
Example:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.randn((3,224,224,2))
label = paddle.randint(high=2, shape=(3,224,224,1))
predictions = F.softmax(x)
loss = F.dice_loss(input=predictions, label=label)
"""
label = one_hot(label, depth=input.shape[-1])
reduce_dim = list(range(1, len(input.shape)))
inse = reduce_sum(input * label, dim=reduce_dim)
dice_denominator = reduce_sum(
input, dim=reduce_dim) + reduce_sum(
label, dim=reduce_dim)
dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
return reduce_mean(dice_score)
def image_resize(input,
out_shape=None,
scale=None,
name=None,
resample='BILINEAR',
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
This op resizes a batch of images.
The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
or a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
and the resizing only applies on the three dimensions(depth, height and width).
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Supporting resample methods:
'LINEAR' : Linear interpolation
'BILINEAR' : Bilinear interpolation
'TRILINEAR' : Trilinear interpolation
'NEAREST' : Nearest neighbor interpolation
'BICUBIC' : Bicubic interpolation
Linear interpolation is the method of using a line connecting two known quantities
to determine the value of an unknown quantity between the two known quantities.
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
Bicubic interpolation is an extension of cubic interpolation for interpolating
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
linear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,W_in)
output: (N,C,H_out,W_out) where:
W_out = W_{in} * scale_{factor}
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of linear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Linear_interpolation.
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
For details of bicubic interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bicubic_interpolation
Parameters:
input (Variable): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape (list|tuple|Variable|None): Output shape of image resize
layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w)
when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor.
Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
resample(str): The resample method. It supports 'LINEAR', 'BICUBIC', 'BILINEAR', 'TRILINEAR'
and 'NEAREST' currently. Default: 'BILINEAR'
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.
Default: True
align_mode(int) : An optional for linear/bilinear/trilinear interpolation. Refer to the fomula in the
the example code above, it can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 ,
can be \'1\' for src_idx = scale*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: out_shape should be a list or tuple or Variable.
TypeError: actual_shape should either be Variable or None.
ValueError: The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR',
'TRILINEAR', 'BICUBIC' or 'NEAREST' currently.
ValueError: 'LINEAR' only support 3-D tensor.
ValueError: 'BICUBIC', 'BILINEAR' and 'NEAREST' only support 4-D tensor.
ValueError: 'TRILINEAR' only support 5-D tensor.
ValueError: One of out_shape and scale must not be None.
ValueError: out_shape length should be 1 for input 3-D tensor.
ValueError: out_shape length should be 2 for input 4-D tensor.
ValueError: out_shape length should be 3 for input 5-D tensor.
ValueError: scale should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
#declarative mode
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.image_resize(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.image_resize(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.image_resize(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.image_resize(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.image_resize(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
resample_methods = {
'LINEAR': 'linear',
'BILINEAR': 'bilinear',
'TRILINEAR': 'trilinear',
'NEAREST': 'nearest',
'LINEAR': 'linear',
}
resample = resample.upper()
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR', 'TRILINEAR' "
"or 'NEAREST' currently.")
resample_type = resample_methods[resample]
if resample == 'LINEAR' and len(input.shape) != 3:
raise ValueError("'LINER only support 3-D tensor.")
elif resample in ['BILINEAR', 'NEAREST'] and len(input.shape) != 4:
raise ValueError("'BILINEAR' and 'NEAREST' only support 4-D tensor.")
elif resample == 'TRILINEAR' and len(input.shape) != 5:
raise ValueError("'TRILINEAR'only support 5-D tensor.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if out_shape is None and scale is None:
raise ValueError("One of out_shape and scale must not be None.")
helper = LayerHelper('{}_interp'.format(resample_type), **locals())
dtype = helper.input_dtype()
if len(input.shape) == 3 and data_format not in ['NCW', 'NWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCW` or `NWC` supported for 3-D input.")
elif len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC':
data_layout = 'NHWC'
inputs = {"X": input}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
if out_shape is not None:
if isinstance(out_shape, Variable):
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if not (_is_list_or_turple_(out_shape)):
raise TypeError(
"out_shape should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(input.shape) == 3:
if len(out_shape) != 1:
raise ValueError("out_shape length should be 1 for "
"input 3-D tensor.")
if contain_var:
attrs['out_w'] = size_list[0]
else:
out_shape = list(map(int, out_shape))
attrs['out_w'] = out_shape[0]
elif len(input.shape) == 4:
if len(out_shape) != 2:
raise ValueError("out_shape length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(input.shape) == 5:
if len(out_shape) != 3:
raise ValueError("out_shape length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = float(scale)
else:
raise TypeError(
"Attr(scale)'s type should be float, int or Variable.")
if isinstance(actual_shape, Variable):
warnings.warn(
"actual_shape will be deprecated, it is recommended to use "
"out_shape instead of actual_shape to specify output shape dynamically."
)
actual_shape.stop_gradient = True
inputs["OutSize"] = actual_shape
elif actual_shape is not None:
raise TypeError("actual_shape should either be Variable or None.")
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
@templatedoc(op_type="linear_interp")
def resize_linear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCW'):
"""
This op resizes the input by performing linear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in
the future and only use :attr:`out_shape` instead.
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Linear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = W_{in} * scale_{factor}
Parameters:
input(Variable): 3-D Tensor(NCW), its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize linear
layer, the shape is (out_w,). Default: None. If a list, each
element can be an integer or a Tensor Variable with shape: [1]. If a
Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCW"`, `"NWC"`.
The default is `"NCW"`. When it is `"NCW"`, the data is stored in the order of:
`[batch_size, input_channels, input_width]`.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: 3-D tensor(NCW or NWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,100])
output = fluid.layers.resize_linear(input=input,out_shape=[50,])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(1,3,100).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
# (1, 3, 50)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_linear(input=input, out_shape=[50,])
print(output.shape)
# [1L, 3L, 50L]
"""
return image_resize(input, out_shape, scale, name, 'LINEAR', actual_shape,
align_corners, align_mode, data_format)
@templatedoc(op_type="bilinear_interp")
def resize_bilinear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
This op resizes the input by performing bilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in
the future and only use :attr:`out_shape` instead.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Parameters:
input(Variable): 4-D Tensor(NCHW), its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize bilinear
layer, the shape is (out_h, out_w).Default: None. If a list, each
element can be an integer or a Tensor Variable with shape: [1]. If a
Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_bilinear(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_bilinear(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_bilinear(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_bilinear(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_bilinear(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape,
align_corners, align_mode, data_format)
@templatedoc(op_type="trilinear_interp")
def resize_trilinear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCDHW'):
"""
This op resizes the input by performing trilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated
in the future and only use :attr:`out_shape` instead.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Parameters:
input(${x_type}): 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_d, out_h, out_w). Default: None. Every element should be an integer or a Tensor Variable with shape: [1] if it is a list. If it is a Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input depth, height or width.
At least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
Variable: A 5-D Tensor(NCDHW or NDHWC)
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import paddle
import numpy as np
paddle.enable_static()
input = fluid.data(name="input", shape=[None,3,6,8,10])
#1
output = fluid.layers.resize_trilinear(input=input,out_shape=[12,12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_trilinear(input=input,out_shape=[12,dim1,4])
#3
#x = np.array([3,12,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_trilinear(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_trilinear(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,8,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12, 12)
#2
# (2, 3, 12, 2, 4)
#3
# (2, 3, 3, 12, 12)
#4
# (2, 3, 3, 4, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_trilinear(input=input, out_shape=[12,12,12])
print(output.shape)
# [2L, 3L, 12L, 12L, 12L]
"""
return image_resize(input, out_shape, scale, name, 'TRILINEAR',
actual_shape, align_corners, align_mode, data_format)
@templatedoc(op_type="nearest_interp")
def resize_nearest(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
data_format='NCHW'):
"""
This op resizes the input by performing nearest neighbor interpolation in both the
height direction and the width direction based on given output shape
which is specified by actual_shape, out_shape and scale in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor(H_{in} * scale_{factor})
W_out = floor(W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
Parameters:
input(${x_type}): 4-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_h, out_w). Default: None. Every element should be an integer or a tensor Variable with shape: [1] if it is a list. If it is a tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_nearest(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_nearest(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_nearest(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_nearest(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_nearest(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
return image_resize(
input,
out_shape,
scale,
name,
'NEAREST',
actual_shape,
align_corners,
align_mode=1,
data_format=data_format)
def image_resize_short(input, out_short_len, resample='BILINEAR'):
"""
This op resizes a batch of images. The short edge of input images will be
resized to the given 'out_short_len'. The long edge of input images
will be resized proportionately to make images' length-width ratio
constant.
Parameters:
input (Variable): 4-D tensor(NCHW), The input tensor of image resize layer.
out_short_len(int): The length of output images' short edge.
resample (str): resample method, default: BILINEAR.
Returns:
Variable: 4-D tensor(NCHW).
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[None,3,6,9], dtype="float32")
out = fluid.layers.image_resize_short(input, out_short_len=3)
"""
in_shape = input.shape
if len(in_shape) != 4:
raise ValueError(
"The rank of input must be 4 (num_batches, channels, in_h, in_w).")
hw = in_shape[2:4]
short_idx = hw.index(min(hw))
long_idx = 1 - short_idx
out_shape = list(hw)
out_shape[short_idx] = out_short_len
out_shape[long_idx] = int(
float(out_shape[long_idx]) * (float(out_short_len) / float(hw[
short_idx])) + 0.5)
return image_resize(input=input, out_shape=out_shape, resample=resample)
@deprecated(since="2.0.0", update_to="paddle.gather")
def gather(input, index, overwrite=True):
"""
Output is obtained by gathering entries of the outer-most dimension
of X indexed by `index` and concatenate them together.
.. math::
Out = X[Index]
.. code-block:: text
Given:
X = [[1, 2],
[3, 4],
[5, 6]]
Index = [1, 2]
Then:
Out = [[3, 4],
[5, 6]]
Args:
input (Tensor): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
float16 (only for GPU).
index (Tensor): The index input tensor with rank=1. Data type is int32 or int64.
overwrite (bool, optional): The mode that updating the grad when has same index.
If True, use the overwrite mode to update the grad of the same index,
if False, use the accumulate mode to update the grad of the same index.
Default value is True.
Returns:
output (Tensor): The output is a tensor with the same rank as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[-1, 5], dtype='float32')
index = fluid.data(name='index', shape=[-1, 1], dtype='int32')
output = fluid.layers.gather(x, index)
"""
if in_dygraph_mode():
return core.ops.gather(input, index, None, 'overwrite', overwrite)
check_variable_and_dtype(
input, 'x',
['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], 'gather')
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather')
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather",
inputs={"X": input,
"Index": index},
outputs={"Out": out},
attrs={'overwrite': overwrite})
return out
@deprecated(since="2.0.0", update_to="paddle.gather_nd")
def gather_nd(input, index, name=None):
"""
**Gather Nd Layer**
This function is actually a high-dimensional extension of :code:`gather`
and supports for simultaneous indexing by multiple axes. :attr:`index` is a
K-dimensional integer tensor, which is regarded as a (K-1)-dimensional
tensor of :attr:`index` into :attr:`input`, where each element defines
a slice of params:
.. math::
output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]
Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has
shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .
.. code-block:: text
Given:
input = [[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]
input.shape = (2, 3, 4)
* Case 1:
index = [[1]]
gather_nd(input, index)
= [input[1, :, :]]
= [[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]
* Case 2:
index = [[0,2]]
gather_nd(input, index)
= [input[0, 2, :]]
= [8, 9, 10, 11]
* Case 3:
index = [[1, 2, 3]]
gather_nd(input, index)
= [input[1, 2, 3]]
= [23]
Args:
input (Tensor): The input Tensor which it's data type should be bool, float32, float64, int32, int64.
index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank.
Its dtype should be int32, int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
output (Tensor): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 4, 5], dtype='float32')
index = fluid.data(name='index', shape=[2, 2], dtype='int32')
output = fluid.layers.gather_nd(x, index)
"""
if in_dygraph_mode():
return core.ops.gather_nd(input, index)
check_variable_and_dtype(input, 'input',
['bool', 'float32', 'float64', 'int32', 'int64'],
'gather_np')
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather_np')
helper = LayerHelper('gather_nd', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather_nd",
inputs={"X": input,
"Index": index},
outputs={"Out": output})
return output
@deprecated(since="2.0.0", update_to="paddle.scatter")
def scatter(input, index, updates, name=None, overwrite=True):
"""
:alias_main: paddle.scatter
:alias: paddle.scatter,paddle.tensor.scatter,paddle.tensor.manipulation.scatter
:old_api: paddle.fluid.layers.scatter
**Scatter Layer**
Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python
import numpy as np
#input:
input = np.array([[1, 1], [2, 2], [3, 3]])
index = np.array([2, 1, 0, 1])
# shape of updates should be the same as input
# shape of updates with dim > 1 should be the same as input
updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
overwrite = False
# calculation:
if not overwrite:
for i in range(len(index)):
input[index[i]] = np.zeros((2))
for i in range(len(index)):
if (overwrite):
input[index[i]] = updates[i]
else:
input[index[i]] += updates[i]
# output:
out = np.array([[3, 3], [6, 6], [1, 1]])
out.shape # [3, 2]
Args:
input (Variable): The input N-D Tensor with rank>=1. Data type can be float32.
index (Variable): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
updates (Variable): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
overwrite (bool): The mode that updating the output when there are same indices.
If True, use the overwrite mode to update the output of the same index,
if False, use the accumulate mode to update the output of the same index.
Default value is True.
Returns:
Variable(Tensor|LoDTensor): The output is a Tensor with the same shape as input.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
input = fluid.layers.data(name='data', shape=[3, 2], dtype='float32', append_batch_size=False)
index = fluid.layers.data(name='index', shape=[4], dtype='int64', append_batch_size=False)
updates = fluid.layers.data(name='update', shape=[4, 2], dtype='float32', append_batch_size=False)
output = fluid.layers.scatter(input, index, updates, overwrite=False)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
in_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32)
index_data = np.array([2, 1, 0, 1]).astype(np.int64)
update_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'data':in_data, "index":index_data, "update":update_data}, fetch_list=[output])
print(res)
# [array([[3., 3.],
# [6., 6.],
# [1., 1.]], dtype=float32)]
"""
helper = LayerHelper('scatter', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter",
inputs={"X": input,
"Ids": index,
"Updates": updates},
attrs={'overwrite': overwrite},
outputs={"Out": out})
return out
def scatter_nd_add(ref, index, updates, name=None):
r"""
**Scatter_nd_add Layer**
Output is obtained by applying sparse addition to a single value
or slice in a Variable.
:attr:`ref` is a Tensor with rank :math:`R`
and :attr:`index` is a Tensor with rank :math:`K` . Thus, :attr:`index`
has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates`
is a Tensor with rank :math:`K - 1 + R - Q` and its
shape is :math:`index.shape[:-1] + ref.shape[index.shape[-1]:]` .
According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,
add the corresponding :attr:`updates` slice to the :attr:`ref` slice
which is obtained by the last one dimension of :attr:`index` .
.. code-block:: text
Given:
* Case 1:
ref = [0, 1, 2, 3, 4, 5]
index = [[1], [2], [3], [1]]
updates = [9, 10, 11, 12]
we get:
output = [0, 22, 12, 14, 4, 5]
* Case 2:
ref = [[65, 17], [-14, -25]]
index = [[], []]
updates = [[[-1, -2], [1, 2]],
[[3, 4], [-3, -4]]]
ref.shape = (2, 2)
index.shape = (2, 0)
updates.shape = (2, 2, 2)
we get:
output = [[67, 19], [-16, -27]]
Args:
ref (Variable): The ref input. Its dtype should be float32, float64.
index (Variable): The index input with rank > 1 and index.shape[-1] <= ref.rank.
Its dtype should be int32 or int64 as it is used as indexes.
updates (Variable): The updated value of scatter_nd_add op, and it must have the same dtype
as ref. It must have the shape index.shape[:-1] + ref.shape[index.shape[-1]:].
name (str|None): The output variable name. If set None, the layer will be named automatically.
Returns:
output (Variable): The output is a tensor with the same shape and dtype as ref.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
ref = fluid.data(name='ref', shape=[3, 5, 9, 10], dtype='float32')
index = fluid.data(name='index', shape=[3, 2], dtype='int32')
updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32')
output = fluid.layers.scatter_nd_add(ref, index, updates)
"""
if in_dygraph_mode():
op = getattr(core.ops, 'scatter_nd_add')
return op(ref, index, updates)
if ref.dtype != updates.dtype:
raise ValueError("ref and updates must have same data type.")
helper = LayerHelper('scatter_nd_add', **locals())
dtype = helper.input_dtype(input_param_name='ref')
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter_nd_add",
inputs={"X": ref,
"Index": index,
"Updates": updates},
outputs={"Out": output})
return output
def scatter_nd(index, updates, shape, name=None):
"""
**Scatter_nd Layer**
Output is obtained by scattering the :attr:`updates` in a new tensor according
to :attr:`index` . This op is similar to :code:`scatter_nd_add`, except the
tensor of :attr:`shape` is zero-initialized. Correspondingly, :code:`scatter_nd(index, updates, shape)`
is equal to :code:`scatter_nd_add(paddle.zeros(shape, updates.dtype), index, updates)` .
If :attr:`index` has repeated elements, then the corresponding updates are accumulated.
Because of the numerical approximation issues, the different order of repeated elements
in :attr:`index` may cause different results. The specific calculation method can be
seen :code:`scatter_nd_add` . This op is the inverse of the :code:`gather_nd` op.
Args:
index (Tensor): The index input with ndim > 1 and index.shape[-1] <= len(shape).
Its dtype should be int32 or int64 as it is used as indexes.
updates (Tensor): The updated value of scatter_nd op. Its dtype should be float32, float64.
It must have the shape index.shape[:-1] + shape[index.shape[-1]:]
shape(tuple|list): Shape of output tensor.
name (str|None): The output Tensor name. If set None, the layer will be named automatically.
Returns:
output (Tensor): The output is a tensor with the same type as :attr:`updates` .
Examples:
.. code-block:: python
import paddle
import numpy as np
index_data = np.array([[1, 1],
[0, 1],
[1, 3]]).astype(np.int64)
index = paddle.to_tensor(index_data)
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
shape = [3, 5, 9, 10]
output = paddle.scatter_nd(index, updates, shape)
"""
return scatter_nd_add(zeros(shape, updates.dtype), index, updates, name)
@templatedoc()
def random_crop(x, shape, seed=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
shape(${shape_type}): ${shape_comment}
seed(int|${seed_type}|None): ${seed_comment} By default, the seed will
get from `random.randint(-65536, 65535)`.
Returns:
${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
img = fluid.data("img", [None, 3, 256, 256])
# cropped_img is [-1, 3, 224, 224]
cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224])
# cropped_img2 shape: [-1, 2, 224, 224]
# cropped_img2 = fluid.layers.random_crop(img, shape=[2, 224, 224])
# cropped_img3 shape: [-1, 3, 128, 224]
# cropped_img3 = fluid.layers.random_crop(img, shape=[128, 224])
"""
helper = LayerHelper("random_crop", **locals())
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'uint8', 'int16', 'int32'],
'random_crop')
check_type(shape, 'shape', (list, Variable), 'random_crop')
dtype = x.dtype
out = helper.create_variable_for_type_inference(dtype)
if seed is None:
seed = np.random.randint(-65536, 65536)
op_attrs = {"shape": shape}
if isinstance(seed, int):
op_attrs["startup_seed"] = seed
seed = helper.create_variable(
name=unique_name.generate("random_crop_seed"),
dtype="int64",
persistable=True)
elif not isinstance(seed, Variable):
raise ValueError("'seed' must be a Variable or an int.")
helper.append_op(
type="random_crop",
inputs={"X": x,
"Seed": seed},
outputs={"Out": out,
"SeedOut": seed},
attrs=op_attrs)
return out
def log(x, name=None):
r"""
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x)
Args:
x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The natural log of the input Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
x = [[2,3,4], [7,8,9]]
x = paddle.to_tensor(x, dtype='float32')
res = paddle.log(x)
# [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]]
"""
if in_dygraph_mode():
return core.ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
inputs = {'X': [x]}
helper = LayerHelper('log', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu")
def relu(x, name=None):
"""
${comment}
Args:
x(Variable): ${x_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[1,2.6]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu(x1)
print(out1.numpy())
# [[0. 0. ]
# [1. 2.6]]
"""
if in_dygraph_mode():
return core.ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
inputs = {'X': [x]}
helper = LayerHelper('relu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.selu")
def selu(x, scale=None, alpha=None, name=None):
r"""
Selu Operator.
The equation is:
.. math::
selu= \\lambda*
\\begin{cases}
x &\\quad \\text{ if } x>0 \n
\\alpha * e^x - \\alpha &\\quad \\text{ if } x<=0
\\end{cases}
The input `X` can carry the LoD (Level of Details) information,
or not. And the output shares the LoD information with input `X`.
Args:
x (Variable): The input N-D Tensor.
scale(float, optional): lambda in selu activation function,
the default value is 1.0507009873554804934193349852946.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
alpha(float, optional): alpha in selu activation function,
the default value is 1.6732632423543772848170429916717.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor): The output Tensor or LoDTensor with the same shape and LoD information as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.selu(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0. , 1.050701],[2.101402, 3.152103]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'selu')
helper = LayerHelper('selu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
attrs = {}
if scale is not None:
attrs["scale"] = scale
if alpha is not None:
attrs["alpha"] = alpha
helper.append_op(
type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs)
return out
def mean_iou(input, label, num_classes):
r"""
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
.. math::
IOU = \\frac{true\_positive}{(true\_positive + false\_positive + false\_negative)}.
The predictions are accumulated in a confusion matrix and mean-IOU
is then calculated from it.
Parameters:
input (Tensor): A n-D Tensor of prediction results for semantic labels with type int32 or int64.
label (Tensor): A Tensor of ground truth labels with type int32 or int64.
Its shape should be the same as input.
num_classes (int32): The possible number of labels.
Returns:
Three Tensors.
- mean_iou(Tensor) : A 1-D Tensor representing the mean intersection-over-union with shape [1]. \
Data type is float32.
- out_wrong(Tensor) : A 1-D Tensor with shape [num_classes]. Data type is int32. \
The wrong numbers of each class.
- out_correct(Tensor): A 1-D Tensor with shape [num_classes]. Data type is int32. The correct numbers of each class.
Examples:
.. code-block:: python
import paddle
iou_shape = [64, 32, 32]
num_classes = 5
predict = paddle.randint(low=0, high=255, shape=iou_shape, dtype='int64')
label = paddle.randint(low=0, high=255, shape=iou_shape, dtype='int64')
mean_iou, out_wrong, out_correct = paddle.metric.mean_iou(predict, label, num_classes)
"""
if in_dygraph_mode():
return core.ops.mean_iou(input, label, 'num_classes', num_classes)
helper = LayerHelper('mean_iou', **locals())
check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'],
'mean_iou')
check_variable_and_dtype(label, 'Labels', ['int32', 'int64'], 'mean_iou')
out_mean_iou = helper.create_variable_for_type_inference(dtype='float32')
out_wrong = helper.create_variable_for_type_inference(dtype='int32')
out_correct = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="mean_iou",
inputs={"Predictions": input,
"Labels": label},
outputs={
"OutMeanIou": out_mean_iou,
"OutWrong": out_wrong,
"OutCorrect": out_correct
},
attrs={"num_classes": num_classes})
return out_mean_iou, out_wrong, out_correct
def crop(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
**Warning:** THIS OP IS DEPRECATED. It will be removed in the future version.
Instructions for updating: Use :ref:`api_fluid_layers_crop_tensor` instead.
.. code-block:: text
* Case 1:
Given
X = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]],
and
shape = [2, 2],
offsets = [0, 1],
output is:
Out = [[1, 2],
[3, 4]].
* Case 2:
Given
X = [[0, 1, 2, 5, 0]
[0, 3, 4, 6, 0]
[0, 0, 0, 0, 0]],
and shape is tensor
shape = [[0, 0, 0]
[0, 0, 0]]
and
offsets = [0, 1],
output is:
Out = [[1, 2, 5],
[3, 4, 6]].
Parameters:
x (Variable): Tensor, data type can be float32 or float64.
shape (Variable|list/tuple of integers): The output shape is specified
by `shape`, which can be a Tensor or a list/tuple of integers.
If it is a Tensor, it's rank must be the same as `x` , only
it's shape will be used, and the value of it will be ignored. This way
is suitable for the case that the output shape may be changed each
iteration. If it is a list/tuple of integers, it's length must be the same
as the rank of `x`
offsets (Variable|list/tuple of integers|None): Specifies the cropping
offsets at each dimension. It can be a Tensor or a list/tuple
of integers. If it is a Tensor, it's rank must be the same as `x`.
This way is suitable for the case that the offsets may be changed
each iteration. If it is a list/tuple of integers, it's length must be the
same as the rank of `x`. If None, the offsets are 0 at each dimension.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name` . Usually name is no need to set and
None by default.
Returns:
The cropped Tensor, which has the same rank and data type with `x`
Return Type:
Variable
Raises:
ValueError: If shape is not a list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name="x", shape=[3, 3, 5], dtype="float32")
y = fluid.data(name="y", shape=[2, 2, 3], dtype="float32")
crop = fluid.layers.crop(x, shape=y)
# or
z = fluid.data(name="z", shape=[3, 3, 5], dtype="float32")
crop = fluid.layers.crop(z, shape=[2, 2, 3])
"""
check_variable_and_dtype(x, 'x', ['float32'], 'crop')
check_type(shape, 'shape', (list, tuple, Variable), 'crop')
helper = LayerHelper('crop', **locals())
if offsets is None:
offsets = [0] * len(x.shape)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
if isinstance(shape, Variable):
ipts['Y'] = shape
else:
attrs['shape'] = shape
if isinstance(offsets, Variable):
ipts['Offsets'] = offsets
else:
attrs['offsets'] = offsets
helper.append_op(
type='crop',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def crop_tensor(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
.. code-block:: text
* Case 1 (input is a 2-D Tensor):
Input:
X.shape = [3, 5]
X.data = [[0, 1, 2, 0, 0],
[0, 3, 4, 0, 0],
[0, 0, 0, 0, 0]]
Parameters:
shape = [2, 2]
offsets = [0, 1]
Output:
Out.shape = [2, 2]
Out.data = [[1, 2],
[3, 4]]
* Case 2 (input is a 3-D Tensor):
Input:
X.shape = [2, 3, 4]
X.data = [[[0, 1, 2, 3],
[0, 5, 6, 7],
[0, 0, 0, 0]],
[[0, 3, 4, 5],
[0, 6, 7, 8],
[0, 0, 0, 0]]]
Parameters:
shape = [2, 2, -1]
offsets = [0, 0, 1]
Output:
Out.shape = [2, 2, 3]
Out.data = [[[1, 2, 3],
[5, 6, 7]],
[[3, 4, 5],
[6, 7, 8]]]
Parameters:
x (Variable): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64.
shape (list|tuple|Variable): The output shape is specified
by `shape`. Its data type is int32. If a list/tuple, it's length must be
the same as the dimension size of `x`. If a Variable, it should be a 1-D Tensor.
When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the shape may
be changed each iteration.
offsets (list|tuple|Variable, optional): Specifies the cropping
offsets at each dimension. Its data type is int32. If a list/tuple, it's length
must be the same as the dimension size of `x`. If a Variable, it should be a 1-D
Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the offsets may be changed
each iteration. Default: None, the offsets are 0 at each dimension.
name(str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: The cropped Tensor has same data type with `x`.
Raises:
TypeError: If the data type of `x` is not in: float32, float64, int32, int64.
TypeError: If `shape` is not a list, tuple or Variable.
TypeError: If the data type of `shape` is not int32.
TypeError: If `offsets` is not None and not a list, tuple or Variable.
TypeError: If the data type of `offsets` is not int32.
ValueError: If the element in `offsets` is less than zero.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name="x", shape=[None, 3, 5], dtype="float32")
# x.shape = [-1, 3, 5], where -1 indicates batch size, and it will get the exact value in runtime.
# shape is a 1-D Tensor
crop_shape = fluid.data(name="crop_shape", shape=[3], dtype="int32")
crop0 = fluid.layers.crop_tensor(x, shape=crop_shape)
# crop0.shape = [-1, -1, -1], it means crop0.shape[0] = x.shape[0] in runtime.
# or shape is a list in which each element is a constant
crop1 = fluid.layers.crop_tensor(x, shape=[-1, -1, 3], offsets=[0, 1, 0])
# crop1.shape = [-1, 2, 3]
# or shape is a list in which each element is a constant or Variable
y = fluid.data(name="y", shape=[3, 8, 8], dtype="float32")
dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
crop2 = fluid.layers.crop_tensor(y, shape=[3, dim1, 4])
# crop2.shape = [3, -1, 4]
# offsets is a 1-D Tensor
crop_offsets = fluid.data(name="crop_offsets", shape=[3], dtype="int32")
crop3 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=crop_offsets)
# crop3.shape = [-1, 2, 3]
# offsets is a list in which each element is a constant or Variable
offsets_var = fluid.data(name="dim1", shape=[1], dtype="int32")
crop4 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=[0, 1, offsets_var])
# crop4.shape = [-1, 2, 3]
"""
helper = LayerHelper('crop_tensor', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'crop_tensor')
check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor')
check_type(offsets, 'offsets', (list, tuple, Variable, type(None)),
'crop_tensor')
if offsets is None:
offsets = [0] * len(x.shape)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
def _attr_shape_check(shape_val):
if not isinstance(shape_val, int):
raise TypeError(
"Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(shape_val))
if shape_val == 0:
raise ValueError(
"Attr(shape) of Op(crop_tensor) should not be zero, but received: %s."
% str(shape_val))
if shape_val < -1:
raise ValueError(
"When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s."
% str(shape_val))
def _attr_offsets_check(offset_val):
if not isinstance(offset_val, int):
raise TypeError(
"Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(offset_val))
if offset_val < 0:
raise ValueError(
"Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s."
% str(offset_val))
if isinstance(offsets, Variable):
offsets.stop_gradient = True
ipts['Offsets'] = offsets
attrs['offsets'] = [-1] * len(x.shape)
elif utils._contain_var(offsets):
new_offsets_tensor = []
offsets_attr = []
for dim in offsets:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_offsets_tensor.append(dim)
offsets_attr.append(-1)
else:
_attr_offsets_check(dim)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_offsets_tensor.append(temp_out)
offsets_attr.append(dim)
ipts['OffsetsTensor'] = new_offsets_tensor
attrs['offsets'] = offsets_attr
else:
for offset in offsets:
_attr_offsets_check(offset)
attrs['offsets'] = offsets
if isinstance(shape, Variable):
shape.stop_gradient = True
ipts['Shape'] = shape
elif utils._contain_var(shape):
new_shape_tensor = []
shape_attr = []
for dim_size in shape:
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
new_shape_tensor.append(dim_size)
shape_attr.append(0)
else:
_attr_shape_check(dim_size)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
shape_attr.append(dim_size)
ipts['ShapeTensor'] = new_shape_tensor
attrs['shape'] = shape_attr
else:
for dim_size in shape:
_attr_shape_check(dim_size)
attrs['shape'] = shape
helper.append_op(
type='crop_tensor',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def affine_grid(theta, out_shape, name=None):
"""
:alias_main: paddle.nn.functional.affine_grid
:alias: paddle.nn.functional.affine_grid,paddle.nn.functional.vision.affine_grid
:old_api: paddle.fluid.layers.affine_grid
It generates a grid of (x,y) coordinates using the parameters of
the affine transformation that correspond to a set of points where
the input feature map should be sampled to produce the transformed
output feature map.
Args:
theta (Variable) - A Tensor with shape [N, 2, 3]. It contains a batch of affine transform parameters.
The data type can be float32 or float64.
out_shape (Variable | list | tuple): The shape of target output with format [batch_size, channel, height, width].
``out_shape`` can be a Tensor or a list or tuple. The data
type must be int32.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor with shape [batch_size, H, W, 2] while 'H' and 'W' are the height and width of feature map in affine transformation. The data type is the same as `theta`.
Raises:
ValueError: If the type of arguments is not supported.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
place = fluid.CPUPlace()
theta = fluid.data(name="x", shape=[None, 2, 3], dtype="float32")
out_shape = fluid.data(name="y", shape=[4], dtype="int32")
grid_0 = fluid.layers.affine_grid(theta, out_shape)
grid_1 = fluid.layers.affine_grid(theta, [5, 3, 28, 28])
batch_size=2
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
output= exe.run(feed={"x": np.random.rand(batch_size,2,3).astype("float32"),
"y": np.array([5, 3, 28, 28]).astype("int32")},
fetch_list=[grid_0.name, grid_1.name])
print(output[0])
print(output[1])
"""
helper = LayerHelper('affine_grid')
check_variable_and_dtype(theta, 'theta', ['float32', 'float64'],
'affine_grid')
if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \
isinstance(out_shape, Variable)):
raise ValueError("The out_shape should be a list, tuple or Variable.")
if not isinstance(theta, Variable):
raise ValueError("The theta should be a Variable.")
out = helper.create_variable_for_type_inference(theta.dtype)
ipts = {'Theta': theta}
attrs = {}
if isinstance(out_shape, Variable):
ipts['OutputShape'] = out_shape
check_variable_and_dtype(out_shape, 'out_shape', ['int32'],
'affine_grid')
else:
attrs['output_shape'] = out_shape
helper.append_op(
type='affine_grid',
inputs=ipts,
outputs={'Output': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def pad2d(input,
paddings=[0, 0, 0, 0],
mode='constant',
pad_value=0.0,
data_format="NCHW",
name=None):
"""
Pad 2-d images according to 'paddings' and 'mode'.
If mode is 'reflect', paddings[0] and paddings[1] must be no greater
than height-1. And the width dimension has the same condition.
Parameters:
input (Tensor): The input image with [N, C, H, W] format or [N, H, W, C] format, which is a 4-D Tensor with data type float32.
paddings (Tensor | List[int32]): The padding size. If padding is a List, it must
contain four integers, (padding_top, padding_bottom, padding_left, padding_right).
Otherwise, it is a 1-D Tensor with shape [4]. Data type is int32.
Default is [0, 0, 0, 0].
mode (str): Three modes: 'constant' (default), 'reflect', 'edge' .
When in 'constant' mode, this op uses a constant value to pad the input tensor.
When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
When in 'edge' mode, uses input boundaries to pad the input tensor.
Default is 'constant'
pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0
data_format (str): An string from: "NHWC", "NCHW". Specify the data format of
the input data.
Default is "NCHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns: Tensor, a 4-D Tensor padded according to paddings and mode and data type is same as input.
Examples:
.. code-block:: text
Input = [[[[1., 2., 3.],
[4., 5., 6.]]]]
Case 0:
paddings = [0, 1, 2, 3],
mode = 'constant'
pad_value = 0
Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.],
[0., 0., 4., 5., 6., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]]]]
Case 1:
paddings = [0, 1, 2, 1],
mode = 'reflect'
Out = [[[[3., 2., 1., 2., 3., 2.],
[6., 5., 4., 5., 6., 5.],
[3., 2., 1., 2., 3., 2.]]]]
Case 2:
paddings = [0, 1, 2, 1],
mode = 'edge'
Out = [[[[1., 1., 1., 2., 3., 3.],
[4., 4., 4., 5., 6., 6.],
[4., 4., 4., 5., 6., 6.]]]]
Code Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
# example 1
x_shape = (1, 1, 3, 4)
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1
tensor_x = paddle.to_tensor(x)
y = paddle.fluid.layers.pad2d(tensor_x, paddings=[1, 2, 2, 1], pad_value=1, mode='constant')
print(y.numpy())
# [[[[ 1. 1. 1. 1. 1. 1. 1.]
# [ 1. 1. 1. 2. 3. 4. 1.]
# [ 1. 1. 5. 6. 7. 8. 1.]
# [ 1. 1. 9. 10. 11. 12. 1.]
# [ 1. 1. 1. 1. 1. 1. 1.]
# [ 1. 1. 1. 1. 1. 1. 1.]]]]
# example 2
x_shape = (1, 1, 2, 3)
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1
tensor_x = paddle.to_tensor(x)
y = paddle.fluid.layers.pad2d(tensor_x, paddings=[1, 1, 1, 1], mode='reflect')
print(y.numpy())
# [[[[5. 4. 5. 6. 5.]
# [2. 1. 2. 3. 2.]
# [5. 4. 5. 6. 5.]
# [2. 1. 2. 3. 2.]]]]
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
"pad2d")
if in_dygraph_mode():
_paddings = paddings.numpy().tolist() if isinstance(
paddings, Variable) else paddings
return core.ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
'data_format', data_format, 'paddings', _paddings)
attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format}
inputs = {'X': [input]}
if isinstance(paddings, Variable):
inputs['Paddings'] = [paddings]
attrs['paddings'] = []
else:
attrs['paddings'] = paddings
helper = LayerHelper('pad2d', **locals())
assert mode in ['reflect', 'edge', 'constant'
], "mode should be one of constant, reflect, edge."
dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.elu")
def elu(x, alpha=1.0, name=None):
"""
:alias_main: paddle.nn.functional.elu
:alias: paddle.nn.functional.elu,paddle.nn.functional.activation.elu
:old_api: paddle.fluid.layers.elu
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|1.0): ${alpha_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
input_elu = np.array([[-1,6],[1,15.6]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input_elu)
y = fluid.layers.elu(x, alpha=0.2)
print(y.numpy())
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
helper = LayerHelper('elu', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='elu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu6")
def relu6(x, threshold=6.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
threshold(float, optional): ${threshold_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
output(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[2.5,7.8]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu6(x=x1, threshold=6.0)
print(out1.numpy())
# [[0. 0. ]
# [2.5 6. ]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
helper = LayerHelper('relu6', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='relu6',
inputs={'X': x},
outputs={'Out': out},
attrs={
'threshold': threshold,
'use_mkldnn': core.globals()["FLAGS_use_mkldnn"]
})
return out
@templatedoc()
def pow(x, factor=1.0, name=None):
"""
This is Pow Activation Operator.
:math:`out = x^{factor}`
Args:
x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[32,32], dtype="float32")
# example 1: argument factor is float
y_1 = fluid.layers.pow(x, factor=2.0)
# y_1 is x^{2.0}
# example 2: argument factor is Variable
factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
y_2 = fluid.layers.pow(x, factor=factor_tensor)
# y_2 is x^{3.0}
"""
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'],
'pow')
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {}
if isinstance(factor, Variable):
check_variable_and_dtype(factor, 'factor', ['float32'], 'pow')
factor.stop_gradient = True
inputs['FactorTensor'] = factor
else:
attrs['factor'] = factor
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@templatedoc()
def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
stanh activation.
.. math::
out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
scale_a (float, optional): The scale factor a of the input. Default is 0.67.
scale_b (float, optional): The scale factor b of the output. Default is 1.7159.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = paddle.stanh(x, scale_a=0.67, scale_b=1.72) # [1.00616539, 1.49927628, 1.65933108, 1.70390463]
"""
if in_dygraph_mode():
return core.ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
helper = LayerHelper('stanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='stanh',
inputs={'X': x},
outputs={'Out': out},
attrs={'scale_a': scale_a,
'scale_b': scale_b})
return out
@templatedoc()
def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
"""
${comment}
Parameters:
x (${x_type}): ${x_comment}
slope (float, optional): ${slope_comment}
offset (float, optional): ${offset_comment}
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.layers.fill_constant(shape=[3, 2], value=0.5, dtype='float32') # [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]]
"""
if in_dygraph_mode():
return core.ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_sigmoid')
helper = LayerHelper('hard_sigmoid', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='hard_sigmoid',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': slope,
'offset': offset})
return out
@templatedoc()
def swish(x, beta=1.0, name=None):
r"""
:alias_main: paddle.nn.functional.swish
:alias: paddle.nn.functional.swish,paddle.nn.functional.activation.swish
:old_api: paddle.fluid.layers.swish
Elementwise swish activation function. See `Searching for Activation Functions <https://arxiv.org/abs/1710.05941>`_ for more details.
Equation:
.. math::
out = \\frac{x}{1 + e^{- beta * x}}
Args:
x(Variable): Tensor or LoDTensor, dtype: float32 or float64, the input of swish activation.
beta(float): Constant beta of swish operator, default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Output of the swish activation, Tensor or LoDTensor, with the same dtype and shape with the input x.
Examples:
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
y = fluid.layers.swish(x, beta=2.0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
data = np.random.randn(2, 3).astype("float32")
exe.run(start)
y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
data
# array([[-1.1239197 , 1.3391294 , 0.03921051],
# [ 1.1970421 , 0.02440812, 1.2055548 ]], dtype=float32)
y_np
# array([[-0.2756806 , 1.0610548 , 0.01998957],
# [ 0.9193261 , 0.01235299, 0.9276883 ]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
data = np.random.randn(2, 3).astype("float32")
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = dg.to_variable(data)
y = fluid.layers.swish(x)
y_np = y.numpy()
data
# array([[-0.0816701 , 1.1603649 , -0.88325626],
# [ 0.7522361 , 1.0978601 , 0.12987892]], dtype=float32)
y_np
# array([[-0.03916847, 0.8835007 , -0.25835553],
# [ 0.51126915, 0.82324016, 0.06915068]], dtype=float32)
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': beta})
return out
@deprecated(since="2.0.0", update_to="paddle.static.nn.prelu")
def prelu(x, mode, param_attr=None, name=None):
r"""
prelu activation.
.. math::
prelu(x) = max(0, x) + \\alpha * min(0, x)
There are three modes for the activation:
.. code-block:: text
all: All elements share same alpha.
channel: Elements in same channel share same alpha.
element: All elements do not share alpha. Each element has its own alpha.
Parameters:
x (Tensor): The input Tensor or LoDTensor with data type float32.
mode (str): The mode for weight sharing.
param_attr (ParamAttr|None, optional): The parameter attribute for the learnable
weight (alpha), it can be create by ParamAttr. None by default.
For detailed information, please refer to :ref:`api_fluid_ParamAttr`.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A tensor with the same shape and data type as x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-1., 2., 3.])
param = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.2))
out = paddle.static.nn.prelu(x, 'all', param)
# [-0.2, 2., 3.]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'prelu')
helper = LayerHelper('prelu', **locals())
if mode not in ['all', 'channel', 'element']:
raise ValueError('mode should be one of all, channel, element.')
alpha_shape = [1]
# NOTE(): The input of this API should be ``N,C,...`` format,
# which means x.shape[0] is batch_size and x.shape[0] is channel.
if mode == 'channel':
assert len(
x.shape
) >= 2, "The size of input shape should be equal or larger than 2 in prelu() when mode is 'channel'"
#NOTE(zhiqiu): The alpha_shape should be [1, channel] + [1] * len(x.shape[2:]).
# To be consistent with Prelu, it is simplified.
#NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version.
alpha_shape = [1, x.shape[1], 1, 1]
elif mode == 'element':
assert len(
x.shape
) >= 1, "The size of input shape should be equal or larger than 1 in prelu() when mode is 'element'"
alpha_shape = [1] + list(x.shape)[1:]
dtype = helper.input_dtype(input_param_name='x')
alpha = helper.create_parameter(
attr=helper.param_attr,
shape=alpha_shape,
dtype='float32',
is_bias=False,
default_initializer=Constant(0.25))
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prelu",
inputs={"X": x,
'Alpha': alpha},
attrs={"mode": mode},
outputs={"Out": out})
return out
@templatedoc()
def brelu(x, t_min=0.0, t_max=24.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
t_min(${t_min_type}|0.0): ${t_min_comment}
t_max(${t_max_type}|24.0): ${t_max_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy as np
paddle.enable_static()
input_brelu = np.array([[-1,6],[1,15.6]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input_brelu)
y = fluid.layers.brelu(x, t_min=1.0, t_max=10.0)
print(y.numpy())
#[[ 1. 6.]
#[ 1. 10.]]
"""
if in_dygraph_mode():
return core.ops.brelu(x, 't_min', t_min, 't_max', t_max)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu')
helper = LayerHelper('brelu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='brelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'t_min': t_min,
't_max': t_max})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.leaky_relu")
@templatedoc()
def leaky_relu(x, alpha=0.02, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|0.02): ${alpha_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
output(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[-1, 2], [3, -4]], dtype='float32')
y = paddle.fluid.layers.leaky_relu(x, alpha=0.1)
print(y) # [[-0.1, 2], [3, -0.4]]
"""
return paddle.nn.functional.leaky_relu(x, alpha, name)
def soft_relu(x, threshold=40.0, name=None):
r"""
SoftRelu Activation Operator.
$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$
Args:
x(Variable): Input of soft_relu operator. Data type can be float32, float64.
threshold(float, optional): The threshold value of soft_relu, default value being 40.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor)): Output of soft_relu operator, shape and LoD same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import numpy as np
import paddle
paddle.enable_static()
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.soft_relu(inputs, threshold=20.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'soft_relu')
helper = LayerHelper('soft_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='soft_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def flatten(x, axis=1, name=None):
r"""
**Flatten op**
Flatten the input tensor into a 2D matrix.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
axis = 2
We get:
Out.shape = (3 * 100, 4 * 100)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
axis = 0
We get:
Out.shape = (1, 3 * 100 * 100 * 4)
Args:
x (Variable): A tensor of rank >= axis. A tensor with type float32,
float64, int8, int32, int64.
axis (int): Indicate up to which input dimensions (exclusive) should
be flattened to the outer dimension of the output.
The value for axis must be in the range [0, R], where R
is the rank of the input tensor. Default: 1.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: A 2D tensor with the contents of the input tensor, with input \
dimensions up to axis flattened to the outer dimension of \
the output and remaining input dimensions flattened into the \
inner dimension of the output. A Tensor with type same as input x.
Raises:
ValueError: If x is not a variable.
ValueError: If axis is not in range [0, rank(x)].
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[4, 4, 3], dtype="float32")
# x shape is [4, 4, 3]
out = fluid.layers.flatten(x=x, axis=2)
# out shape is [16, 3]
"""
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten')
helper = LayerHelper('flatten', **locals())
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Variable")
if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0:
raise ValueError("The axis should be a int, and in range [0, rank(x)]")
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten2',
inputs={"X": x},
outputs={'Out': out,
'XShape': x_shape},
attrs={"axis": axis})
return out
def stack(x, axis=0, name=None):
"""
This OP stacks all the inputs :code:`x` along axis.
.. code-block:: text
Case 1:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 0
Output:
Out.dims = [3, 1, 2]
Out.data =[ [ [1.0, 2.0] ],
[ [3.0, 4.0] ],
[ [5.0, 6.0] ] ]
Case 2:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 1 or axis = -2
Output:
Out.shape = [1, 3, 2]
Out.data =[ [ [1.0, 2.0]
[3.0, 4.0]
[5.0, 6.0] ] ]
Args:
x (list(Variable)|tuple(Variable)): Input :code:`x` can be a :code:`list` or :code:`tuple` of Tensors, the shapes of all these Tensors
must be the same. Supposing input is N dims
Tensors :math:`[d_0, d_1, ..., d_{n-1}]`, the output is N+1 dims
Tensor :math:`[d_0, d_1, d_{axis-1}, len(x), d_{axis}, ..., d_{n-1}]`.
Supported data types: float32, float64, int32, int64.
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``,
where ``R`` is the number of dimensions of the first input tensor ``x[0]``.
If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Variable: The stacked Tensor, has same data type with input Tensors. Output dim is :math:`rank(x[0])+1`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x1 = fluid.data(name='x1', shape=[None, 1, 2], dtype='int32')
x2 = fluid.data(name='x2', shape=[None, 1, 2], dtype='int32')
# stack Tensor list
data = layers.stack([x1,x2]) # stack according to axis 0, data.shape=[2, None, 1, 2]
data = layers.stack([x1,x2], axis=1) # stack according to axis 1, data.shape=[None, 2, 1, 2]
"""
axis = 0 if axis is None else axis
if in_dygraph_mode():
return core.ops.stack(x, 'axis', axis)
if not isinstance(x, list) and not isinstance(x, tuple):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
# In that case, Variable is array of tensors indeed.
if isinstance(x, Variable) and x.desc.type(
) == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
x = [x]
else:
raise TypeError("The type of '%s' in %s must be %s, but received %s"
% ('x', 'stack',
'list[Tensor], tuple[Tensor] or TensorArray',
type(x)))
helper = LayerHelper('stack', **locals())
out = helper.create_variable_for_type_inference(x[0].dtype)
if x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \
"number of the elements must be 1, but received %s." % len(x)
out_index = helper.create_variable_for_type_inference(dtype="int32")
for i in x:
check_variable_and_dtype(i, 'x', \
['float16', 'float32', 'float64', 'int32', 'int64'], 'stack')
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': x[0]},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': True})
else:
helper.append_op(
type='stack',
inputs={'X': x},
outputs={'Y': out},
attrs={'axis': axis})
return out
@templatedoc(op_type="filter_by_instag")
def filter_by_instag(ins, ins_tag, filter_tag, is_lod, out_val_if_empty=0):
"""
**Filter By Instag Layer**
This function filter a batch of ins by instag,
There are multiple ins, and every ins belongs to some tags.
We can specify some tags we want. So the ins which belongs to that tags
remains in the output, and others removed.
For example, one batch has 4 ins. Every ins has its tag list.
| Ins | Ins_Tag |
|:-----:|:------:|
| 0 | 0, 1 |
| 1 | 1, 3 |
| 2 | 0, 3 |
| 3 | 2, 6 |
And Lod is [1,1,1,1]
And the filter tags [1]
From the definition above, ins which has tag 1 can pass the filter
So Ins 0 and Ins 1 can pass and be seen in the output,
Ins 2 and 3 cannot pass because they do not has tag 1.
Actually, if is_lod is false, it is normal tensor that equals to
lod_tensor with all 1, similar to the example above.
Args:
ins (Variable): Input Variable (LoDTensor), usually it is 2D tensor
And first dimension can have lod info or not.
ins_tag (Variable): Input Variable (LoDTensor), usually it is 1D list
And split them by lod info
filter_tag (Variable): Input Variable (1D Tensor/List), usually it is
list that holds the tags.
is_lod (Bool): Boolean value to indicate ins is lod tensor or not.
out_val_if_empty(Int64): If the output after filter is empty, this value
will be set to Output tensor.
Returns:
Variable: filtered ins (LoDTensor) and loss weight (Tensor)
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
ins = layers.data(name='Ins', shape=[-1,32], lod_level=0, dtype='float64')
ins_tag = layers.data(name='Ins_tag', shape=[-1,16], lod_level=0, dtype='int64')
filter_tag = layers.data(name='Filter_tag', shape=[-1,16], dtype='int64')
out, loss_weight = layers.filter_by_instag(ins, ins_tag, filter_tag, True)
"""
helper = LayerHelper('filter_by_instag', **locals())
out = helper.create_variable_for_type_inference(dtype=ins.dtype)
loss_weight = helper.create_variable_for_type_inference(dtype=np.float64)
mmap = helper.create_variable_for_type_inference(dtype=ins_tag.dtype)
helper.append_op(
type='filter_by_instag',
inputs={'Ins': ins,
'Ins_tag': ins_tag,
'Filter_tag': filter_tag},
outputs={'Out': out,
'LossWeight': loss_weight,
'IndexMap': mmap},
attrs={'is_lod': is_lod,
'out_val_if_empty': out_val_if_empty})
return [out, loss_weight]
def unstack(x, axis=0, num=None):
"""
:alias_main: paddle.unstack
:alias: paddle.unstack,paddle.tensor.unstack,paddle.tensor.manipulation.unstack
:old_api: paddle.fluid.layers.unstack
**UnStack Layer**
This layer unstacks input Tensor :code:`x` into several Tensors along :code:`axis`.
If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x)`.
If :code:`num` is None, it would be inferred from :code:`x.shape[axis]`,
and if :code:`x.shape[axis]` <= 0 or is unknown, :code:`ValueError` is
raised.
Args:
x (Tensor): Input Tensor. It is a N-D Tensors of data types float32, float64, int32, int64.
axis (int): The axis along which the input is unstacked.
num (int|None): The number of output variables.
Returns:
list(Tensor): The unstacked Tensors list. The list elements are N-D Tensors of data types float32, float64, int32, int64.
Raises:
ValueError: If x.shape[axis] <= 0 or axis is not in range [-D, D).
Examples:
.. code-block:: python
import paddle
x = paddle.ones(name='x', shape=[2, 3, 5], dtype='float32') # create a tensor with shape=[2, 3, 5]
y = paddle.unstack(x, axis=1) # unstack with second axis, which results 3 tensors with shape=[2, 5]
"""
if in_dygraph_mode():
if num == None:
num = x.shape[axis]
return core.ops.unstack(x, num, 'axis', int(axis), 'num', num)
helper = LayerHelper('unstack', **locals())
if num is None:
if axis is None or x.shape[axis] <= 0:
raise ValueError('unknown unstack number')
else:
num = x.shape[axis]
outs = []
for _ in range(num):
outs.append(helper.create_variable_for_type_inference(x.dtype))
helper.append_op(
type='unstack',
inputs={'X': [x]},
outputs={'Y': outs},
attrs={'axis': axis,
'num': num})
return outs
@deprecated(since='2.0.0', update_to="paddle.expand")
def expand(x, expand_times, name=None):
"""
:alias_main: paddle.expand
:alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand
:old_api: paddle.fluid.layers.expand
This operation tiles ``x`` multiple times according to the parameter ``expand_times``.
The times number for each dimension of ``x`` is set by the parameter ``expand_times``.
The rank of ``x`` should be less than or equal to 6. Please note that size of ``expand_times`` must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
Attr(expand_times): [1, 2, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A ``Tensor`` or ``LoDTensor`` with dimension in [1, 6]. The data type is ``bool``, ``float32``, ``float64`` or ``int32`` .
expand_times (list|tuple|Variable): The data type is ``int32`` . If ``expand_times`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``expand_times`` is an Variable, it should be an 1-D Tensor.
Expand times number for each dimension of ``x`` .
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. After expanding, size of each dimension of output is equal to the size of the corresponding dimension of ``x`` multiplying the corresponding value given by ``expand_times`` .
Raises:
TypeError: The type of ``expand_times`` must be list, tuple or Variable.
ValueError: The elements of ``expand_times`` cannot be negative.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0)
expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2])
# the shape of expanded_1 is [2, 6, 2].
# example 2:
data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3)
expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4)
expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times)
# the shape of expanded_2 is [48, 56].
"""
if in_dygraph_mode():
attrs = ()
expand_times_tensor = None
if isinstance(expand_times, (list, tuple)):
expand_times = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in expand_times
]
attrs += ('expand_times', expand_times)
elif isinstance(expand_times, Variable):
expand_times_tensor = expand_times
expand_times_tensor.stop_gradient = True
return core.ops.expand(x, expand_times_tensor, *attrs)
inputs = {"X": [x]}
attrs = {}
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand')
check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True:
raise ValueError(
"expand op bool date type must set the stop_gradient to be False")
helper = LayerHelper('expand', input=x, **locals())
def get_attr_expand_times(list_expand_times):
attrs_expand_times = []
for idx, times in enumerate(list_expand_times):
if isinstance(times, Variable):
attrs_expand_times.append(-1)
else:
attrs_expand_times.append(times)
assert times > 0, (
"Each element given in expand_times must not be negative.")
return attrs_expand_times
if isinstance(expand_times, Variable):
expand_times.stop_gradient = True
inputs['ExpandTimes'] = expand_times
elif isinstance(expand_times, (list, tuple)):
attrs['expand_times'] = get_attr_expand_times(expand_times)
if utils._contain_var(expand_times):
inputs['expand_times_tensor'] = utils._convert_to_tensor_list(
expand_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@deprecated(since='2.0.0', update_to="paddle.expand_as")
def expand_as(x, target_tensor, name=None):
"""
:alias_main: paddle.expand_as
:alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as
:old_api: paddle.fluid.layers.expand_as
expand_as operator tiles to the input by given expand tensor. You should set expand tensor
for each dimension by providing tensor 'target_tensor'. The rank of X
should be in [1, 6]. Please note that size of 'target_tensor' must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
target_tensor's shape: [2, 6, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A Tensor with dtype float64, float32, int32.
A tensor with rank in [1, 6].
target_tensor (Variable): A Tensor with dtype float64, float32, int32.
target_tensor for expanding to Input(X). Only use target_tensor'shape.
Returns:
Variable: A Tensor with dtype float64, float32, int32.
After expanding, size of each dimension of Output(Out) is equal to the size
of the corresponding dimension of target_tensor multiplying the corresponding
value given by target_tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64')
target_tensor = fluid.layers.data(
name="target_tensor", shape=[-1,20], dtype='float64')
result = fluid.layers.expand_as(x=data, target_tensor=target_tensor)
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,10)
y = np.random.rand(3,20)
output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name])
print(output[0].shape)
#(3,20)
"""
if in_dygraph_mode():
return core.ops.expand_as(x, target_tensor)
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as')
check_variable_and_dtype(target_tensor, 'target_tensor',
['float32', 'float64', 'int32', 'int64', 'bool'],
'expand_as')
helper = LayerHelper('expand_as', input=x, **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
inputs = {'X': x, 'target_tensor': target_tensor}
helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out})
return out
from paddle.fluid.framework import convert_np_dtype_to_dtype_
@deprecated(since='1.8.0', update_to="paddle.uniform")
@templatedoc()
def uniform_random_batch_size_like(input,
shape,
dtype='float32',
input_dim_idx=0,
output_dim_idx=0,
min=-1.0,
max=1.0,
seed=0):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension.
.. code-block:: text
*Case 1:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 0,
input_dim_idx = 0,
result.shape[0] = input.shape[0],
then:
result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4]
*Case 2:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
input_dim_idx=1
output_dim_idx=1
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 1,
input_dim_idx = 1,
result.shape[1] = input.shape[1],
then:
result=[[-0.23133647, -0.84195036, 0.21441269],
[-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3]
Args:
input (Variable): A Tensor. Supported data types: float32, float64.
shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int.
input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0.
output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32.
Returns:
Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
input = fluid.data(name="input", shape=[1, 3], dtype='float32')
out_1 = fluid.layers.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4]
# example 2:
out_2 = fluid.layers.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3]
"""
check_variable_and_dtype(input, 'Input', ("float32", 'float64'),
'uniform_random_batch_size_like')
check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like')
check_dtype(dtype, 'dtype', ('float32', 'float64'),
'uniform_random_batch_size_like')
helper = LayerHelper('uniform_random_batch_size_like', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='uniform_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'min': min,
'max': max,
'seed': seed,
'dtype': c_dtype
})
return out
@deprecated(since="2.0.0", update_to="paddle.normal")
@templatedoc()
def gaussian_random(shape,
mean=0.0,
std=1.0,
seed=0,
dtype='float32',
name=None):
"""
This OP returns a Tensor filled with random values sampled from a Gaussian
distribution, with ``shape`` and ``dtype``.
Args:
shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
mean(float|int, optional): Mean of the output tensor, default is 0.0.
std(float|int, optional): Standard deviation of the output tensor, default
is 1.0.
seed(int, optional): ${seed_comment}
dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of
the output Tensor. Supported data types: float32, float64.
Default is float32.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a Gaussian
distribution, with ``shape`` and ``dtype``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
# attr shape is a list which doesn't contain Tensor.
result_1 = fluid.layers.gaussian_random(shape=[3, 4])
# [[-0.31261674, 1.8736548, -0.6274357, 0.96988016],
# [-0.12294637, 0.9554768, 1.5690808, -1.2894802 ],
# [-0.60082096, -0.61138713, 1.5345167, -0.21834975]]
# example 2:
# attr shape is a list which contains Tensor.
dim_1 = fluid.layers.fill_constant([1], "int64", 2)
dim_2 = fluid.layers.fill_constant([1], "int32", 3)
result_2 = fluid.layers.gaussian_random(shape=[dim_1, dim_2])
# [[ 0.51398206, -0.3389769, 0.23597084],
# [ 1.0388143, -1.2015356, -1.0499583 ]]
# example 3:
# attr shape is a Tensor, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = fluid.layers.gaussian_random(var_shape)
# if var_shape's value is [2, 3]
# result_3 is:
# [[-0.12310527, 0.8187662, 1.923219 ]
# [ 0.70721835, 0.5210541, -0.03214082]]
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.layers.gaussian_random((2, 3), std=2., seed=10)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
exe.run(start)
x_np, = exe.run(main, feed={}, fetch_list=[x])
x_np
# array([[2.3060477, 2.676496 , 3.9911983],
# [0.9990833, 2.8675377, 2.2279181]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = fluid.layers.gaussian_random((2, 4), mean=2., dtype="float32", seed=10)
x_np = x.numpy()
x_np
# array([[2.3060477 , 2.676496 , 3.9911983 , 0.9990833 ],
# [2.8675377 , 2.2279181 , 0.79029655, 2.8447366 ]], dtype=float32)
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return core.ops.gaussian_random('shape', shape, 'mean',
float(mean), 'std',
float(std), 'seed', seed, 'dtype',
dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn')
inputs = {}
attrs = {
'mean': mean,
'std': std,
'seed': seed,
'dtype': dtype,
'use_mkldnn': False
}
utils.get_shape_tensor_inputs(
inputs=inputs,
attrs=attrs,
shape=shape,
op_type='gaussian_random/randn')
helper = LayerHelper('gaussian_random', **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='gaussian_random',
inputs=inputs,
outputs={'Out': out},
attrs=attrs)
return out
@templatedoc()
def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
"""
This op is used for sampling id from multinomial distribution from the input, sampling one id for one sample.
Parameters:
x (Variable): 2-D tensor, [batch_size, input_feature_dimensions]
min (Float): minimum , default 0.0.
max (Float): maximum, default 1.0.
seed (Float): Random seed, default 0. if seed is not 0, will generate same number every time.
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
Returns:
Variable: sampling tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name="X",
shape=[13, 11],
dtype='float32')
out = fluid.layers.sampling_id(x)
"""
helper = LayerHelper('sampling_id', **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='sampling_id',
inputs={'X': x},
outputs={'Out': out},
attrs={'min': min,
'max': max,
'seed': seed})
return out
@deprecated(since='1.8.0', update_to="paddle.normal")
@templatedoc()
def gaussian_random_batch_size_like(input,
shape,
input_dim_idx=0,
output_dim_idx=0,
mean=0.0,
std=1.0,
seed=0,
dtype='float32'):
"""
${comment}
Args:
input (Variable): ${input_comment}
shape (tuple|list): ${shape_comment}
input_dim_idx (int): ${input_dim_idx_comment}
output_dim_idx (int): ${output_dim_idx_comment}
mean (float): ${mean_comment}
std (float): ${std_comment}
seed (int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data, float32 or float_64.
Returns:
out (Variable): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[13, 11], dtype='float32')
out = fluid.layers.gaussian_random_batch_size_like(
input, shape=[-1, 11], mean=1.0, std=2.0)
"""
helper = LayerHelper('gaussian_random_batch_size_like', **locals())
check_type(input, 'input', (Variable),
'fluid.layers.gaussian_random_batch_size_like')
check_type(shape, 'shape', (list, tuple),
'fluid.layers.gaussian_random_batch_size_like')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'int'],
'fluid.layers.gaussian_random_batch_size_like')
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='gaussian_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'mean': mean,
'std': std,
'seed': seed,
'dtype': c_dtype
})
return out
@templatedoc()
def sum(x):
"""
${comment}
Case 1:
::
Input:
Input. Shape = [2, 3]
Input = [[1, 2, 3],
[4, 5, 6]]
Output:
The output. Shape = [2, 3]
Output = [[1, 2, 3],
[4, 5, 6]]
Case 2:
::
Input:
First input:
Input1. Shape = [2, 3]
Input1 = [[1, 2, 3],
[4, 5, 6]]
The second input:
Input2. Shape = [2, 3]
Input2 = [[7, 8, 9],
[10, 11, 12]]
Output:
The output. Shape = [2, 3]
Output = [[8, 10, 12],
[14, 16, 18]]
Args:
x (Variable|list(Variable)): ${x_comment}
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5)
input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3)
sum = fluid.layers.sum([input0, input1])
# You can print out 'sum' via executor.
out = fluid.layers.Print(sum, message="the sum of input0 and input1: ")
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program())
# The printed result is:
# 1570701754 the sum of input0 and input1: The place is:CPUPlace
# Tensor[sum_0.tmp_0]
# shape: [2,3,]
# dtype: l
# data: 8,8,8,8,8,8,
# the sum of input0 and input1 is 2-D Tensor with shape [2,3].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
return paddle.add_n(x)
@templatedoc()
def slice(input, axes, starts, ends):
"""
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` (here 0 is the initial position).
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``.
Following examples will explain how slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0.
Then:
result = [ [2, 3, 4], ] # result = data[0:1, 1:4]
Args:
input (Tensor): A ``Tensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to .
starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Tensor, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Tensor, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
Returns:
Tensor: A ``Tensor``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Tensor.
TypeError: The type of ``ends`` must be list, tuple or Tensor.
Examples:
.. code-block:: python
import paddle
input = paddle.rand(shape=[4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
sliced_1 = paddle.slice(input, axes=axes, starts=starts, ends=ends)
# sliced_1 is input[0:3, 0:2, 2:4].
# example 2:
# attr starts is a list which contain tensor.
minus_3 = paddle.full([1], -3, "int32")
sliced_2 = paddle.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
# sliced_2 is input[0:3, 0:2, 2:4].
"""
if in_dygraph_mode():
attrs = ()
starts_tensor = None
ends_tensor = None
infer_flags = list(1 for i in range(len(axes)))
if isinstance(starts, (list, tuple)):
starts = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in starts
]
attrs += ('starts', starts)
elif isinstance(starts, Variable):
starts_tensor = starts
starts.stop_gradient = True
infer_flags = list(-1 for i in range(len(axes)))
if isinstance(ends, (list, tuple)):
ends = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in ends
]
attrs += ('ends', ends)
elif isinstance(ends, Variable):
ends_tensor = ends
ends_tensor.stop_gradient = True
infer_flags = list(-1 for i in range(len(axes)))
return core.ops.slice(input, starts_tensor, ends_tensor, 'axes', axes,
'infer_flags', infer_flags, *attrs)
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple.")
if not isinstance(ends, (list, tuple, Variable)):
raise ValueError(
"Input ends must be an Variable, python list or tuple.")
helper = LayerHelper('slice', **locals())
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if utils._contain_var(starts):
inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if utils._contain_var(ends):
inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# infer_flags
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op(
type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
@deprecated(since='2.0.0', update_to="paddle.strided_slice")
def strided_slice(input, axes, starts, ends, strides):
"""
:alias_main: paddle.strided_slice
:alias: paddle.strided_slice,paddle.tensor.strided_slice,paddle.tensor.manipulation.strided_slice
:old_api: paddle.fluid.layers.strided_slice
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
Following examples will explain how strided_slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
strides = [1, 1]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [2, 0]
strides = [1, -1]
Then:
result = [ [8, 7, 6], ]
Case3:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000]
strides = [1, 3]
Then:
result = [ [2], ]
Args:
input (Variable): An N-D ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
strides (list|tuple|Variable): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``strides`` is an Variable, it should be an 1-D Tensor .
It represents slice step of corresponding axis in ``axes``.
Returns:
Variable: A ``Tensor`` or ``LoDTensor`` with the same dimension as ``input``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Variable.
TypeError: The type of ``ends`` must be list, tuple or Variable.
TypeError: The type of ``strides`` must be list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
input = fluid.data(
name="input", shape=[3, 4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor Variable.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
strides_1 = [1, 1, 1]
strides_2 = [1, 1, 2]
sliced_1 = fluid.layers.strided_slice(input, axes=axes, starts=starts, ends=ends, strides=strides_1)
# sliced_1 is input[:, 0:3:1, 0:2:1, 2:4:1].
# example 2:
# attr starts is a list which contain tensor Variable.
minus_3 = fluid.layers.fill_constant([1], "int32", -3)
sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2].
"""
helper = LayerHelper('strided_slice', **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'strided_slice')
check_type(axes, 'axes', (list, tuple), 'strided_slice')
check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice')
check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice')
check_type(strides, 'strides', (list, tuple, Variable), 'strided_slice')
def check_list_elements_dtype(list_input, input_name):
if isinstance(list_input, Variable):
check_dtype(list_input.dtype, input_name, ['int32'],
'strided_slice')
else:
for i, var in enumerate(list_input):
var_name = input_name + '[' + str(i) + ']'
if isinstance(var, Variable):
check_dtype(var.dtype, var_name, ['int32'], 'strided_slice')
check_list_elements_dtype(axes, 'axes')
check_list_elements_dtype(starts, 'starts')
check_list_elements_dtype(ends, 'ends')
check_list_elements_dtype(strides, 'strides')
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
if in_dygraph_mode():
inputs = {'Input': input}
attrs = {
'axes': axes,
'starts': starts,
'ends': ends,
'strides': strides,
'infer_flags': infer_flags
}
else:
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# strides
if isinstance(strides, Variable):
strides.stop_gradient = True
inputs['StridesTensor'] = strides
elif isinstance(strides, (list, tuple)):
attrs['strides'] = []
if utils._contain_var(strides):
inputs['StridesTensorList'] = get_new_list_tensor(strides)
for i, dim in enumerate(strides):
if isinstance(dim, Variable):
attrs['strides'].append(-1)
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = strides
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op(
type='strided_slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
def shape(input):
"""
:alias_main: paddle.shape
:alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape
:old_api: paddle.fluid.layers.shape
**Shape Layer**
Get the shape of the input.
.. code-block:: text
Case1:
Given N-D Tensor:
input = [ [1, 2, 3, 4], [5, 6, 7, 8] ]
Then:
input.shape = [2, 4]
Case2:
Given SelectedRows:
input.rows = [0, 4, 19]
input.height = 20
input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor
Then:
input.shape = [3, 2]
Args:
input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, float16, float32, float64, int32, int64.
If input variable is type of SelectedRows, returns the shape of it's inner tensor.
Returns:
Variable (Tensor): The shape of the input variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32")
output = fluid.layers.shape(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.ones((3, 100, 100)).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([ 3, 100, 100], dtype=int32)]
"""
check_variable_and_dtype(
input, 'input',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'shape')
helper = LayerHelper('shape', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='shape', inputs={'Input': input}, outputs={'Out': out})
return out
def rank(input):
"""
The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
Args:
input (Tensor): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
Returns:
Tensor, the output data type is int32.: The 0-D tensor with the dimensions of the input Tensor.
Examples:
.. code-block:: python
import paddle
input = paddle.rand((3, 100, 100))
rank = paddle.rank(input)
print(rank)
# 3
"""
check_type(input, 'input', (Variable), 'input')
ndims = len(input.shape)
out = assign(np.array(ndims, 'int32'))
return out
@deprecated(since="2.0.0", update_to="paddle.numel")
def size(input):
"""
**Size Layer**
Returns the number of elements for a tensor, which is a int64 Tensor with shape [1].
Args:
input (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.
Returns:
Tensor: The number of elements for the input Tensor.
Raises:
TypeError: ``input`` must be a Tensor and the data type of ``input`` must be one of bool, float16, float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
input = layers.data(
name="input", shape=[3, 100], dtype="float32", append_batch_size=False)
rank = layers.size(input) # 300
"""
if in_dygraph_mode():
return core.ops.size(input)
check_variable_and_dtype(
input, 'input',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], "size")
helper = LayerHelper('size', **locals())
out = helper.create_variable_for_type_inference(dtype='int64')
helper.append_op(type='size', inputs={'Input': input}, outputs={'Out': out})
return out
def _elementwise_op(helper):
op_type = helper.layer_type
x = helper.kwargs.get('x', None)
y = helper.kwargs.get('y', None)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type=op_type,
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'axis': axis,
'use_mkldnn': use_mkldnn})
return helper.append_activation(out)
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
Scale operator.
Putting scale and bias to the input Tensor as following:
``bias_after_scale`` is True:
.. math::
Out=scale*X+bias
``bias_after_scale`` is False:
.. math::
Out=scale*(X+bias)
Args:
x(Tensor): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8.
scale(float|Tensor): The scale factor of the input, it should be a float number or a Tensor with shape [1] and data type as float32.
bias(float): The bias to be put on the input.
bias_after_scale(bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances.
act(str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Output tensor of scale operator, with shape and data type same as input.
Examples:
.. code-block:: python
# scale as a float32 number
import paddle
data = paddle.randn(shape=[2,3], dtype='float32')
res = paddle.scale(data, scale=2.0, bias=1.0)
.. code-block:: python
# scale with parameter scale as a Tensor
import paddle
data = paddle.randn(shape=[2, 3], dtype='float32')
factor = paddle.to_tensor([2], dtype='float32')
res = paddle.scale(data, scale=factor, bias=1.0)
"""
if in_dygraph_mode():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
out = core.ops.scale(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
return dygraph_utils._append_activation_in_dygraph(out)
check_variable_and_dtype(x, "x", [
'float16', 'uint16', 'float32', 'float64', 'int8', 'int16', 'int32',
'int64', 'uint8'
], "scale")
inputs = {'X': [x]}
attrs = {
'bias': float(bias),
'bias_after_scale': bias_after_scale,
}
if isinstance(scale, Variable):
inputs['ScaleTensor'] = [scale]
else:
attrs['scale'] = float(scale)
helper = LayerHelper('scale', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return helper.append_activation(out)
def elementwise_add(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_add(x, y)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [3., 8., 6.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=1)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=3)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x,
y,
axis=axis,
act=act,
op_name='elementwise_add',
use_mkldnn=core.globals()["FLAGS_use_mkldnn"])
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
@deprecated(since="2.0.0", update_to="paddle.divide")
def elementwise_div(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_div(x, y)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [2., 0.6, 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=1)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=3)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_div')
return _elementwise_op(LayerHelper('elementwise_div', **locals()))
def elementwise_sub(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_sub(x, y)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [1., -2., 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=1)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=3)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_sub')
return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
@deprecated(since="2.0.0", update_to="paddle.multiply")
def elementwise_mul(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_mul(x, y)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [2., 15., 8.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=1)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=3)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mul')
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
def elementwise_max(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_max
:alias: paddle.elementwise_max,paddle.tensor.elementwise_max,paddle.tensor.math.elementwise_max
:old_api: paddle.fluid.layers.elementwise_max
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_max(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2, 5, 4]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_max(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_max')
return _elementwise_op(LayerHelper('elementwise_max', **locals()))
def elementwise_min(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_min
:alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min
:old_api: paddle.fluid.layers.elementwise_min
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_min(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 2]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_min(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_min')
return _elementwise_op(LayerHelper('elementwise_min', **locals()))
def elementwise_pow(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_pow(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2, 243, 16]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_pow')
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
@deprecated(since="2.0.0", update_to="paddle.remainder")
def elementwise_mod(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 6, 5]).astype('int32')
}
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_mod(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 3]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mod')
return _elementwise_op(LayerHelper('elementwise_mod', **locals()))
@deprecated(since="2.0.0", update_to="paddle.floor_divide")
def elementwise_floordiv(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 7, 5]).astype('int32')
}
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_floordiv(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[3, 2, 1]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_floordiv')
return _elementwise_op(LayerHelper('elementwise_floordiv', **locals()))
for func in [
elementwise_add,
elementwise_div,
elementwise_sub,
elementwise_mul,
elementwise_max,
elementwise_pow,
elementwise_min,
elementwise_mod,
elementwise_floordiv,
]:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
# insert the c++ doc string on top of python doc string
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"axis (int32, optional): If X.dimension != Y.dimension, \
Y.dimension must be a subsequence of x.dimension. \
And axis is the start dimension index for broadcasting Y onto X. ",
"act (string, optional): Activation applied to the output. \
Default is None. Details: :ref:`api_guide_activations_en` ",
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
],
skip_attrs_set={
"x_data_format", "y_data_format", "axis", "use_quantizer",
"mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out"
}) + """\n""" + str(func.__doc__)
doc_list = func.__doc__.splitlines()
for idx, val in enumerate(doc_list):
if val.startswith("Warning: ") and val.endswith(
" instead."
) and "and will be removed in future versions." in val:
doc_list.insert(0, doc_list.pop(idx))
func.__doc__ = "\n" + "\n".join(i for i in doc_list)
break
for func in []:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"act (basestring|None): Activation applied to the output.",
"name (basestring|None): Name of the output."
])
func.__doc__ = func.__doc__ + """
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1: shape(x) = (2, 3, 4, 5), shape(y) = (2, 3, 4, 5)
x0 = fluid.layers.data(name="x0", shape=[2, 3, 4, 5], dtype='float32')
y0 = fluid.layers.data(name="y0", shape=[2, 3, 4, 5], dtype='float32')
z0 = fluid.layers.%s(x0, y0)
# example 2: shape(X) = (2, 3, 4, 5), shape(Y) = (5)
x1 = fluid.layers.data(name="x1", shape=[2, 3, 4, 5], dtype='float32')
y1 = fluid.layers.data(name="y1", shape=[5], dtype='float32')
z1 = fluid.layers.%s(x1, y1)
# example 3: shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
x2 = fluid.layers.data(name="x2", shape=[2, 3, 4, 5], dtype='float32')
y2 = fluid.layers.data(name="y2", shape=[4, 5], dtype='float32')
z2 = fluid.layers.%s(x2, y2, axis=2)
# example 4: shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
x3 = fluid.layers.data(name="x3", shape=[2, 3, 4, 5], dtype='float32')
y3 = fluid.layers.data(name="y3", shape=[3, 4], dtype='float32')
z3 = fluid.layers.%s(x3, y3, axis=1)
# example 5: shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
x4 = fluid.layers.data(name="x4", shape=[2, 3, 4, 5], dtype='float32')
y4 = fluid.layers.data(name="y4", shape=[2], dtype='float32')
z4 = fluid.layers.%s(x4, y4, axis=0)
# example 6: shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
x5 = fluid.layers.data(name="x5", shape=[2, 3, 4, 5], dtype='float32')
y5 = fluid.layers.data(name="y5", shape=[2], dtype='float32')
z5 = fluid.layers.%s(x5, y5, axis=0)
""" % (func.__name__, func.__name__, func.__name__, func.__name__,
func.__name__, func.__name__)
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if in_dygraph_mode():
op = getattr(core.ops, op_name)
if binary_op:
return op(x, y)
else:
return op(x)
check_variable_and_dtype(x, "x", ["bool"], op_name)
if y is not None:
check_variable_and_dtype(y, "y", ["bool"], op_name)
if out is not None:
check_type(out, "out", Variable, op_name)
helper = LayerHelper(op_name, **locals())
if binary_op:
assert x.dtype == y.dtype
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if binary_op:
helper.append_op(
type=op_name, inputs={"X": x,
"Y": y}, outputs={"Out": out})
else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out})
return out
def logical_and(x, y, out=None, name=None):
r"""
``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by
.. math::
out = x \&\& y
.. note::
``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args:
x (Tensor): the input tensor, it's data type should be bool.
y (Tensor): the input tensor, it's data type should be bool.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([True])
y = paddle.to_tensor([True, False, True, False])
res = paddle.logical_and(x, y)
print(res) # [True False True False]
"""
return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
def logical_or(x, y, out=None, name=None):
"""
``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by
.. math::
out = x || y
.. note::
``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args:
x (Tensor): the input tensor, it's data type should be bool.
y (Tensor): the input tensor, it's data type should be bool.
out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples:
.. code-block:: python
import paddle
import numpy as np
x_data = np.array([True, False], dtype=np.bool).reshape(2, 1)
y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.logical_or(x, y)
print(res) # [[ True True] [ True False]]
"""
return _logical_op(
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)
def logical_xor(x, y, out=None, name=None):
r"""
``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by
.. math::
out = (x || y) \&\& !(x \&\& y)
.. note::
``paddle.logical_xor`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args:
x (Tensor): the input tensor, it's data type should be bool.
y (Tensor): the input tensor, it's data type should be bool.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples:
.. code-block:: python
import paddle
import numpy as np
x_data = np.array([True, False], dtype=np.bool).reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2])
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.logical_xor(x, y)
print(res) # [[False, True], [ True, False]]
"""
return _logical_op(
op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_not(x, out=None, name=None):
"""
``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``x`` and ``out`` are N-dim boolean ``Variable``.
Each element of ``out`` is calculated by
.. math::
out = !x
Args:
x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output.
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: ${out_comment}
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([True, False, True, False])
res = paddle.logical_not(x)
print(res) # [False True False True]
"""
return _logical_op(
op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False)
@templatedoc()
def clip(x, min, max, name=None):
"""
:old_api: paddle.fluid.layers.clip
${comment}
Args:
x(${x_type}): ${x_comment}
min(float): ${min_comment}
max(float): ${max_comment}
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_comment}
Return Type:
${out_type}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data', shape=[1], dtype='float32')
reward = fluid.layers.clip(x=input, min=-1.0, max=1.0)
"""
helper = LayerHelper("clip", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip')
if name is None:
name = unique_name.generate_with_ignorable_key(".".join(
[helper.name, 'tmp']))
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip",
inputs={"X": x},
attrs={"min": min,
"max": max},
outputs={"Out": out})
return out
@templatedoc()
def clip_by_norm(x, max_norm, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
max_norm(${max_norm_type}): ${max_norm_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
input = paddle.to_tensor([[2.0, 2.0], [2.0, 2.0]], dtype='float32')
reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0)
# [[0.5, 0.5], [0.5, 0.5]]
"""
if in_dygraph_mode():
return core.ops.clip_by_norm(x, 'max_norm', max_norm)
helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm')
check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
if name is None:
name = unique_name.generate_with_ignorable_key(".".join(
[helper.name, 'tmp']))
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip_by_norm",
inputs={"X": x},
attrs={"max_norm": max_norm},
outputs={"Out": out})
return out
@deprecated(since="2.0.0", update_to="paddle.mean")
@templatedoc()
def mean(x, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(
name='data', shape=[2, 3], dtype='float32')
mean = fluid.layers.mean(input)
"""
if in_dygraph_mode():
return core.ops.mean(x)
helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out})
return out
@templatedoc()
def merge_selected_rows(x, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
b = fluid.default_main_program().global_block()
var = b.create_var(
name="X", dtype="float32", persistable=True,
type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
y = fluid.layers.merge_selected_rows(var)
"""
helper = LayerHelper("merge_selected_rows", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="merge_selected_rows",
inputs={"X": x},
attrs={},
outputs={"Out": out})
return out
def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
"""
Mul Operator.
This operator is used to perform matrix multiplication for input $x$ and $y$.
The equation is:
.. math::
Out = x * y
Both the input $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $x$.
Args:
x (Variable): The first input Tensor/LoDTensor of mul_op.
y (Variable): The second input Tensor/LoDTensor of mul_op.
x_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $x$ is a tensor with more than two dimensions, $x$ will be flattened into a two-dimensional matrix first. The flattening rule is: the first `num_col_dims` will be flattened to form the first dimension of the final matrix (the height of the matrix), and the rest `rank(x) - num_col_dims` dimensions are flattened to form the second dimension of the final matrix (the width of the matrix). As a result, height of the flattened matrix is equal to the product of $x$'s first `x_num_col_dims` dimensions' sizes, and width of the flattened matrix is equal to the product of $x$'s last `rank(x) - num_col_dims` dimensions' size. For example, suppose $x$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default is 1.
y_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $y$ is a tensor with more than two dimensions, $y$ will be flattened into a two-dimensional matrix first. The attribute `y_num_col_dims` determines how $y$ is flattened. See comments of `x_num_col_dims` for more details. Default is 1.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of mul op.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
dataX = fluid.layers.data(name="dataX", append_batch_size = False, shape=[2, 5], dtype="float32")
dataY = fluid.layers.data(name="dataY", append_batch_size = False, shape=[5, 3], dtype="float32")
output = fluid.layers.mul(dataX, dataY,
x_num_col_dims = 1,
y_num_col_dims = 1)
"""
if in_dygraph_mode():
return core.ops.mul(x, y, 'x_num_col_dims', x_num_col_dims,
'y_num_col_dims', y_num_col_dims)
inputs = {"X": [x], "Y": [y]}
attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims}
helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mul", inputs={"X": x,
"Y": y}, attrs=attrs, outputs={"Out": out})
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.maxout")
@templatedoc()
def maxout(x, groups, name=None, axis=1):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
groups(int): ${groups_comment}
axis(int, optional): ${axis_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: ${out_comment}
Raises:
ValueError: If `axis` is not 1, -1 or 3.
ValueError: If the number of input channels can not be divisible by `groups`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
input = fluid.data(
name='data',
shape=[None, 256, 32, 32],
dtype='float32')
out = fluid.layers.maxout(input, groups=2)
"""
return paddle.nn.functional.maxout(**locals())
def space_to_depth(x, blocksize, name=None):
r"""
Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width]
This op rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of \
theinput LoDtensor where values from the height and width dimensions are moved to the channel \
dimension.
The attr blocksize indicates the input block size.
space_to_depth will reorganize the elements of input with shape[batch, channel, height, width] \
according to blocksize to construct output with shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]:
- Non-overlapping blocks of size block_size x block size are rearranged into depth at each location.
- The Y, X coordinates within each block of the input become the high order component of the output channel index
- channel should be divisible by square of blocksize
- height, width should be divsible by blocksize
This OP is useful for resizing the activations between convolutions \
(but keeping all data)
.. code-block:: text
Given the input x with the shape [1, 1, 4, 4]:
x.data = [[[[1, 2, 5, 6],
[3, 4, 7, 8],
[9, 10, 13, 14],
[11, 12, 15, 16]]]]
blocksize = 2
then get the output with the shape [1, 4, 2, 2]:
out.data = [[[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]],
[[13, 14], [15, 16]]]]
Args:
x (Variable): The input, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel, height, width]
blocksize (int): The blocksize to select the element on each feature map should be > 2
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The output, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]
Return Type: Variable
Raises:
TypeError: blocksize type must be int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import numpy as np
import paddle
paddle.enable_static()
data = fluid.data(
name='data', shape=[1, 4, 2, 2], dtype='float32')
space_to_depthed = fluid.layers.space_to_depth(
x=data, blocksize=2)
exe = fluid.Executor(fluid.CPUPlace())
data_np = np.arange(0,16).reshape((1,4,2,2)).astype('float32')
print(data_np)
#array([[[[ 0., 1.], [ 2., 3.]],
# [[ 4., 5.], [ 6., 7.]],
# [[ 8., 9.], [10., 11.]],
# [[12., 13.], [14., 15.]]]], dtype=float32)
out_main = exe.run(fluid.default_main_program(),
feed={'data': data_np},
fetch_list=[space_to_depthed])
print(out_main)
#[array([[[[ 0.]], [[ 4.]], [[ 1.]], [[ 5.]],
# [[ 8.]], [[12.]], [[ 9.]], [[13.]],
# [[ 2.]], [[ 6.]], [[ 3.]], [[ 7.]],
# [[10.]], [[14.]], [[11.]], [[15.]]]], dtype=float32)]
"""
helper = LayerHelper("space_to_depth", **locals())
if not (isinstance(blocksize, int)):
raise ValueError("blocksize must be a python Int")
check_variable_and_dtype(x, 'x', \
['float16', 'float32', 'float64', 'int32', 'int64'], 'space_to_depth')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="space_to_depth",
inputs={"X": x},
attrs={"blocksize": blocksize},
outputs={"Out": out})
return out
def affine_channel(x,
scale=None,
bias=None,
data_layout='NCHW',
name=None,
act=None):
"""
Applies a separate affine transformation to each channel of the input.
Useful for replacing spatial batch norm with its equivalent fixed
transformation. The input also can be 2D tensor and applies a affine
transformation in second dimension.
Args:
x (Variable): Feature map input can be a 4D tensor with order NCHW
or NHWC. It also can be a 2D tensor and the affine transformation
is applied in the second dimension.The data type is float32 or float64.
scale (Variable): 1D input of shape (C), the c-th element is the scale
factor of the affine transformation for the c-th channel of
the input.The data type is float32 or float64.
bias (Variable): 1D input of shape (C), the c-th element is the bias
of the affine transformation for the c-th channel of the input.
The data type is float32 or float64.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. If input is 2D Tensor, you can ignore
data_layout.
name (str, default None): The name of this layer. For more information,
please refer to :ref:`api_guide_Name` .
act (str, default None): Activation to be applied to the output of this layer.
Returns:
Variable: A tensor which has the same shape, data layout and data type with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
import paddle.fluid as fluid
import paddle
paddle.enable_static()
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
data = fluid.data(name='data', shape=[None, 1, 2, 2], dtype='float32')
input_scale = fluid.layers.create_parameter(shape=[1], dtype="float32",
default_initializer=fluid.initializer.Constant(2.0))
input_bias = fluid.layers.create_parameter(shape=[1],dtype="float32",
default_initializer=fluid.initializer.Constant(0.5))
out = fluid.layers.affine_channel(data,scale=input_scale,
bias=input_bias)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_array] = exe.run(test_program,
fetch_list=out,
feed={'data': np.ones([1,1,2,2]).astype('float32')})
# out_array is [[[[2.5, 2.5],
# [2.5, 2.5]]]] with shape: [1, 1, 2, 2]
"""
helper = LayerHelper("affine_channel", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'affine_channel')
check_type(scale, 'scale', (Variable, type(None)), 'affine_channel')
check_type(bias, 'bias', (Variable, type(None)), 'affine_channel')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="affine_channel",
inputs={"X": x,
'Scale': scale,
'Bias': bias},
attrs={"data_layout": data_layout},
outputs={"Out": out})
return helper.append_activation(out)
def similarity_focus(input, axis, indexes, name=None):
r"""
SimilarityFocus Operator
Generate a similarity focus mask with the same shape of input using the following method:
1. Extract the 3-D tensor(here the first dimension is BatchSize) corresponding
to the axis according to the indexes. For example, if axis=1 and indexes=[a],
it will get the matrix T=X[:, a, :, :]. In this case, if the shape of input X
is (BatchSize, A, B, C), the shape of tensor T is (BatchSize, B, C).
2. For each index, find the largest numbers in the tensor T, so that the same
row and same column has at most one number(what it means is that if the
largest number has been found in the i-th row and the j-th column, then
the numbers in the i-th row or j-th column will be skipped. And then the
next largest number will be selected from the remaining numbers. Obviously
there will be min(B, C) numbers), and mark the corresponding position of the
3-D similarity focus mask as 1, otherwise as 0. Do elementwise-or for
each index.
3. Broadcast the 3-D similarity focus mask to the same shape of input X.
Refer to `Similarity Focus Layer <http://www.aclweb.org/anthology/N16-1108>`_
.. code-block:: text
* Example :
Given a 4-D tensor x with the shape (BatchSize, C, A, B), where C is
the number of channels and the shape of feature map is (A, B):
x.shape = (2, 3, 2, 2)
x.data = [[[[0.8, 0.1],
[0.4, 0.5]],
[[0.9, 0.7],
[0.9, 0.9]],
[[0.8, 0.9],
[0.1, 0.2]]],
[[[0.2, 0.5],
[0.3, 0.4]],
[[0.9, 0.7],
[0.8, 0.4]],
[[0.0, 0.2],
[0.4, 0.7]]]]
Given axis: 1 (the axis of the channel)
Given indexes: [0]
then we get a 4-D tensor out with the same shape of input x:
out.shape = (2, 3, 2, 2)
out.data = [[[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]]],
[[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]]]]
Args:
input(Variable): The input tensor variable(default float). It should
be a 4-D tensor with shape [BatchSize, A, B, C]. Data type is
float32 or float64.
axis(int): Indicating the dimension to be selected. It can only be
1, 2 or 3.
indexes(list): Indicating the indexes of the selected dimension.
Returns:
Variable: A tensor variable with the same shape and same type \
as the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(
name='data', shape=[-1, 3, 2, 2], dtype='float32')
fluid.layers.similarity_focus(input=data, axis=1, indexes=[0])
"""
helper = LayerHelper('similarity_focus', **locals())
# check attrs
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
"similarity_focus")
check_type(axis, 'axis', int, "similarity_focus")
check_type(indexes, 'indexes', list, "similarity_focus")
if axis != 1 and axis != 2 and axis != 3:
raise ValueError("axis must be 1, 2 or 3.")
if len(indexes) == 0:
raise ValueError("indexes can not be empty.")
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='similarity_focus',
inputs={'X': input},
outputs={'Out': out},
attrs={"axis": axis,
"indexes": indexes})
return out
def hash(input, hash_size, num_hash=1, name=None):
"""
This OP hash the input to an integer less than the hash_size.
The hash algorithm we used was xxHash - Extremely fast hash algorithm
(https://github.com/Cyan4973/xxHash/tree/v0.6.5)
Args:
input(Variable): A **Two-Dimensional** LoDTensor with type int32, int64.
**Only support LoDTensor**.
num_hash(int, optional): The times of hash, default is 1.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: A LoDTensor with the same data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
place = fluid.core.CPUPlace()
x = fluid.data(name="x", shape=[2,2], dtype="int32", lod_level=1)
res = fluid.layers.hash(name="res", input=x, hash_size=1000, num_hash=4)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
in1 = np.array([[1,2],[3,4]]).astype("int32")
print(in1)
x_i = fluid.create_lod_tensor(in1, [[0, 2]], place)
res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False)
print(np.array(res[0]))
# [[[722]
# [407]
# [337]
# [395]]
# [[603]
# [590]
# [386]
# [901]]]
"""
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'hash')
check_type(hash_size, 'hash_size', int, 'hash')
check_type(num_hash, 'num_hash', int, 'hash')
helper = LayerHelper('hash', **locals())
out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True)
helper.append_op(
type='hash',
inputs={'X': input},
outputs={'Out': out},
attrs={'num_hash': num_hash,
'mod_by': hash_size})
return out
@templatedoc()
def grid_sampler(x, grid, name=None):
"""
This operation samples input X by using bilinear interpolation based on
flow field grid, which is usually generated by :code:`affine_grid` . The grid of
shape [N, H, W, 2] is the concatenation of (x, y) coordinates
with shape [N, H, W] each, where x is indexing the 4th dimension
(in width dimension) of input data x and y is indexing the 3rd
dimension (in height dimension), finally results is the bilinear
interpolation value of 4 nearest corner points. The output tensor
shape will be [N, C, H, W].
.. code-block:: text
Step 1:
Get (x, y) grid coordinates and scale to [0, H-1/W-1].
.. code-block:: text
grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1)
grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1)
Step 2:
Indices input data X with grid (x, y) in each [H, W] area, and bilinear
interpolate point value by 4 nearest points.
wn ------- y_n ------- en
| | |
| d_n |
| | |
x_w --d_w-- grid--d_e-- x_e
| | |
| d_s |
| | |
ws ------- y_s ------- wn
x_w = floor(x) // west side x coord
x_e = x_w + 1 // east side x coord
y_n = floor(y) // north side y coord
y_s = y_s + 1 // south side y coord
d_w = grid_x - x_w // distance to west side
d_e = x_e - grid_x // distance to east side
d_n = grid_y - y_n // distance to north side
d_s = y_s - grid_y // distance to south side
wn = X[:, :, y_n, x_w] // north-west point value
en = X[:, :, y_n, x_e] // north-east point value
ws = X[:, :, y_s, x_w] // south-east point value
es = X[:, :, y_s, x_w] // north-east point value
output = wn * d_e * d_s + en * d_w * d_s
+ ws * d_e * d_n + es * d_w * d_n
Args:
x(Variable): The input tensor, which is a 4-D tensor with shape
[N, C, H, W], N is the batch size, C is the channel
number, H and W is the feature height and width.
The data type is float32 or float64.
grid(Variable): Input grid tensor of shape [N, H, W, 2]. The
data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: Output of shape [N, C, H, W] data samples input X
using bilnear interpolation based on input grid.
The data type is same as input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# use with affine_grid
x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32')
grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32])
out = fluid.layers.grid_sampler(x=x, grid=grid)
"""
helper = LayerHelper("grid_sampler", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler')
check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
'grid_sampler')
if not isinstance(x, Variable):
return ValueError("The x should be a Variable")
if not isinstance(grid, Variable):
return ValueError("The grid should be a Variable")
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x, 'Grid': grid}
helper.append_op(type='grid_sampler', inputs=ipts, outputs={'Output': out})
return out
def log_loss(input, label, epsilon=1e-4, name=None):
r"""
**Negative Log Loss Layer**
This layer accepts input predictions and target label and returns the
negative log loss.
.. math::
Out = -label * \\log{(input + \\epsilon)}
- (1 - label) * \\log{(1 - input + \\epsilon)}
Args:
input (Tensor|list): A 2-D tensor with shape [N x 1], where N is the
batch size. This input is a probability computed
by the previous operator. Data type float32.
label (Tensor|list): The ground truth which is a 2-D tensor with
shape [N x 1], where N is the batch size.
Data type float32.
epsilon (float, optional): A small number for numerical stability. Default 1e-4.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Tensor, which shape is [N x 1], data type is float32.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
label = paddle.randn((10,1))
prob = paddle.randn((10,1))
cost = F.log_loss(input=prob, label=label)
"""
helper = LayerHelper('log_loss', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'log_loss')
check_variable_and_dtype(label, 'label', ['float32'], 'log_loss')
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='log_loss',
inputs={'Predicted': [input],
'Labels': [label]},
outputs={'Loss': [loss]},
attrs={'epsilon': epsilon})
return loss
def add_position_encoding(input, alpha, beta, name=None):
r"""
This operator performs weighted sum of input feature at each position
(position in the sequence) and the corresponding position encoding.
For more details of position encoding, please refer to `Attention Is All You
Need <http://arxiv.org/pdf/1706.03762.pdf>`_ .
The formula is as follows:
.. math::
PE(pos, 2i) &= \\sin{(pos / 10000^{2i / P})} \\\\
PE(pos, 2i + 1) &= \\cos{(pos / 10000^{2i / P})} \\\\
Out(:, pos, i) &= \\alpha * input(:, pos, i) + \\beta * PE(pos, i)
Where:
- :math:`PE(pos, 2i)` : the value at even index `2i` for encoding of position `pos`.
- :math:`PE(pos, 2i + 1)` : the value at odd index `2i+1` for encoding of position `pos`
Args:
input(Variable): A Tensor or LoDTensor (lod level is 1). If it is a
Tensor, the shape should be `[N, M, P]`, where `N` stands for
batch size, `M` for sequence length, `P` for the size of feature
dimension. If it is a LoDTensor, the shape should be `[N, P]`,
where `N` stands for the total sequence lengths in this mini-batch,
`P` for the size of feature. The data type should be float32 or float64.
alpha(float): Indicate the weight coefficient for `input` when performing
weighted sum.
beta(float): Indicate the weight coefficient for position encoding when
performing weighted sum.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: A Tensor or LoDTensor. It has the same shape, data type and lod as `input`.
Examples:
.. code-block:: python
import paddle
tensor = paddle.randn([16, 32, 64])
position_tensor = paddle.fluid.layers.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0)
"""
if in_dygraph_mode():
return core.ops.add_position_encoding(input, "alpha", alpha, "beta",
beta)
helper = LayerHelper('add_position_encoding', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
"add_position_encoding")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type="add_position_encoding",
inputs={"X": input},
outputs={"Out": out},
attrs={"alpha": alpha,
"beta": beta})
return out
def bilinear_tensor_product(x,
y,
size,
act=None,
name=None,
param_attr=None,
bias_attr=None):
r"""
:api_attr: Static Graph
**Bilinear Tensor Product Layer**
This layer performs bilinear tensor product on two inputs.
For example:
.. math::
out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N].
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
Args:
x (Variable): 2-D input tensor with shape [batch_size, M]. Data type
is float32 or float64.
y (Variable): 2-D input tensor with shape [batch_size, N]. Data type
should be same as **x**.
size (int): The dimension of this layer.
act (str|None): Activation to be applied to the output of this layer. Default None.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
param_attr (ParamAttr|None): To specify the weight parameter attribute.
Default: None, which means the default weight parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
bias_attr (ParamAttr|None): To specify the bias parameter attribute.
Default: None, which means the default bias parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
Returns:
Variable: A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
layer1 = paddle.static.data("t1", shape=[-1, 5], dtype="float32")
layer2 = paddle.static.data("t2", shape=[-1, 4], dtype="float32")
tensor = paddle.static.nn.bilinear_tensor_product(x=layer1, y=layer2, size=1000)
"""
helper = LayerHelper('bilinear_tensor_product', **locals())
dtype = helper.input_dtype('x')
param_shape = [size, x.shape[1], y.shape[1]]
w = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False)
out = helper.create_variable_for_type_inference(dtype=dtype)
inputs = {"X": x, "Y": y, "Weight": w}
if helper.bias_attr:
bias_size = [1, size]
bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
inputs["Bias"] = bias
helper.append_op(
type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out})
# add activation
return helper.append_activation(out)
@templatedoc()
def get_tensor_from_selected_rows(x, name=None):
"""
This operator gets tensor data from input with SelectedRows type, and outputs a LoDTensor.
.. code-block:: text
input x is SelectedRows:
x.rows = [0, 5, 5, 4, 19]
x.height = 20
x.value = [[1, 1] [2, 2] [2, 2] [3, 3] [6, 6]]
Ouput is LoDTensor:
out.shape = [5, 2]
out.data = [[1, 1],
[2, 2],
[2, 2],
[3, 3],
[6, 6]]
Args:
x(SelectedRows): Input with SelectedRows type. The data type is float32, float64, int32 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor transformed from SelectedRows. The data type is same with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
b = fluid.default_main_program().global_block()
input = b.create_var(name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
out = fluid.layers.get_tensor_from_selected_rows(input)
"""
check_type(x, 'x', Variable, 'get_tensor_from_selected_rows')
if x.type != core.VarDesc.VarType.SELECTED_ROWS:
raise TypeError(
"The type of 'x' in get_tensor_from_selected_rows must be SELECTED_ROWS."
)
helper = LayerHelper('get_tensor_from_selected_rows', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='get_tensor_from_selected_rows',
inputs={'X': x},
outputs={'Out': out},
attrs={})
return out
def shuffle_channel(x, group, name=None):
"""
This operator shuffles the channels of input x.
It divide the input channels in each group into :attr:`group` subgroups,
and obtain a new order by selecting element from every subgroup one by one.
Please refer to the paper
https://arxiv.org/pdf/1707.01083.pdf
.. code-block:: text
Given a 4-D tensor input with the shape (N, C, H, W):
input.shape = (1, 4, 2, 2)
input.data =[[[[0.1, 0.2],
[0.2, 0.3]],
[[0.3, 0.4],
[0.4, 0.5]],
[[0.5, 0.6],
[0.6, 0.7]],
[[0.7, 0.8],
[0.8, 0.9]]]]
Given group: 2
then we get a 4-D tensor out whth the same shape of input:
out.shape = (1, 4, 2, 2)
out.data = [[[[0.1, 0.2],
[0.2, 0.3]],
[[0.5, 0.6],
[0.6, 0.7]],
[[0.3, 0.4],
[0.4, 0.5]],
[[0.7, 0.8],
[0.8, 0.9]]]]
Args:
x(Variable): The input tensor variable. It should be a 4-D tensor with shape [N, C, H, W]
group(int): Indicating the counts of subgroups, It should divide the number of channels.
Returns:
out(Variable): the channels shuffling result is a tensor variable with the
same shape and same type as the input.
Raises:
ValueError: If group is not an int type variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32')
out = fluid.layers.shuffle_channel(x=input, group=2)
"""
helper = LayerHelper("shuffle_channel", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(group, int):
raise TypeError("group must be int type")
helper.append_op(
type="shuffle_channel",
inputs={"X": x},
outputs={"Out": out},
attrs={"group": group})
return out
@templatedoc()
def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
"""
**Temporal Shift Operator**
${comment}
Args:
x(Tensor): ${x_comment}
seg_num(int): ${seg_num_comment}
shift_ratio(float): ${shift_ratio_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCHW" or "NHWC". Default: "NCHW".
Returns:
out(Tensor): The temporal shifting result is a tensor with the
same shape and same data type as the input.
Raises:
TypeError: seg_num must be int type.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
input = paddle.randn([6, 4, 2, 2])
out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2)
"""
if data_format not in ["NCHW", "NHWC"]:
raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. "
"Received Attr(data_format): {}.".format(data_format))
if in_dygraph_mode():
return core.ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio',
shift_ratio, 'data_format', data_format)
helper = LayerHelper("temporal_shift", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
check_type(seg_num, 'seg_num', int, 'temporal_shift')
check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(seg_num, int):
raise TypeError("seg_num must be int type.")
helper.append_op(
type="temporal_shift",
inputs={"X": x},
outputs={"Out": out},
attrs={
"seg_num": seg_num,
"shift_ratio": shift_ratio,
"data_format": data_format
})
return out
class PyFuncRegistry(object):
_register_funcs = []
def __init__(self, func):
if func is None or not callable(func):
raise TypeError('func must be a Python function')
self._func = func
# find named args using reflection
args = inspect.getargspec(self._func)
if len(args[0]) == 0 and args[1] is None and args[2] is None:
# Function with no inputs
self._named_args = None
else:
self._named_args = args[0]
self._id = core._append_python_callable_object_and_return_id(self)
'''
Why record self here?
1. For debug usage. Users can call
:code:`py_func.registered_func(idx)` method
to find the registered function corresponding
to :code:`idx`.
2. For increasing reference count of self.
It seems that to release Python object
whose reference count is 1 would cause
segmentation fault error in C++ side.
May be lack of Python GC in C++ side?
'''
PyFuncRegistry._register_funcs.append(self)
@classmethod
def registered_func(cls, idx):
return cls._register_funcs[idx]._func
@classmethod
def registered_func_num(cls):
return len(cls._register_funcs)
@property
def id(self):
return self._id
def __call__(self, *args):
if self._named_args is None:
func_ret = self._func()
else:
kwargs = dict()
idx = 0
for arg in self._named_args:
kwargs[arg] = args[idx]
idx += 1
func_ret = self._func(*args[idx:], **kwargs)
if not isinstance(func_ret, (list, tuple)):
func_ret = (func_ret, )
ret = []
for each_ret in func_ret:
if each_ret is None or isinstance(each_ret, core.LoDTensor):
ret.append(each_ret)
continue
if not isinstance(each_ret, np.ndarray):
each_ret = np.array(each_ret)
tensor = core.LoDTensor()
tensor.set(each_ret, core.CPUPlace())
ret.append(tensor)
return tuple(ret)
@static_only
@templatedoc()
def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
"""
:api_attr: Static Graph
This OP is used to register customized Python OP to Paddle. The design
principe of py_func is that Tensor and numpy array can be converted to each
other easily. So you can use Python and numpy API to register a python OP.
The forward function of the registered OP is ``func`` and the backward function
of that is ``backward_func``. Paddle will call ``func`` at forward runtime and
call ``backward_func`` at backward runtime(if ``backward_func`` is not None).
``x`` is the input of ``func``, whose type must be Tensor; ``out`` is
the output of ``func``, whose type can be either Tensor or numpy array.
The input of the backward function ``backward_func`` is ``x``, ``out`` and
the gradient of ``out``. If ``out`` have no gradient, the relevant input of
``backward_func`` is None. If ``x`` do not have a gradient, the user should
return None in ``backward_func``.
The data type and shape of ``out`` should also be set correctly before this
API is called, and the data type and shape of the gradient of ``out`` and
``x`` will be inferred automatically.
This API can also be used to debug the neural network by setting the ``func``
as a function that only print variables.
Args:
func (callable): The forward function of the registered OP. When the network
is running, the forward output ``out`` will be calculated according to this
function and the forward input ``x``. In ``func`` , it's suggested that we
actively convert Tensor into a numpy array, so that we can use Python and
numpy API arbitrarily. If not, some operations of numpy may not be compatible.
x (Tensor|tuple(Tensor)|list[Tensor]): The input of the forward function ``func``.
It can be Tensor|tuple(Tensor)|list[Tensor]. In addition, Multiple Tensor
should be passed in the form of tuple(Tensor) or list[Tensor].
out (T|tuple(T)|list[T]): The output of the forward function ``func``, it can be
T|tuple(T)|list[T], where T can be either Tensor or numpy array. Since Paddle
cannot automatically infer the shape and type of ``out``, you must create
``out`` in advance.
backward_func (callable, optional): The backward function of the registered OP.
Its default value is None, which means there is no reverse calculation. If
it is not None, ``backward_func`` is called to calculate the gradient of
``x`` when the network is at backward runtime.
skip_vars_in_backward_input (Tensor, optional): It's used to limit the input
list of ``backward_func``, and it can be Tensor|tuple(Tensor)|list[Tensor].
It must belong to either ``x`` or ``out``. The default value is None, which means
that no tensors need to be removed from ``x`` and ``out``. If it is not None,
these tensors will not be the input of ``backward_func``. This parameter is only
useful when ``backward_func`` is not None.
Returns:
Tensor|tuple(Tensor)|list[Tensor]: The output ``out`` of the forward function ``func``.
Examples:
.. code-block:: python
# example 1:
import paddle
import six
import numpy as np
paddle.enable_static()
# Creates a forward function, Tensor can be input directly without
# being converted into numpy array.
def tanh(x):
return np.tanh(x)
# Skip x in backward function and return the gradient of x
# Tensor must be actively converted to numpy array, otherwise,
# operations such as +/- can't be used.
def tanh_grad(y, dy):
return np.array(dy) * (1 - np.square(np.array(y)))
# Creates a forward function for debugging running networks(print value)
def debug_func(x):
print(x)
def create_tmp_var(name, dtype, shape):
return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def simple_net(img, label):
hidden = img
for idx in six.moves.range(4):
hidden = paddle.static.nn.fc(hidden, size=200)
new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
dtype=hidden.dtype, shape=hidden.shape)
# User-defined forward and backward
hidden = paddle.static.py_func(func=tanh, x=hidden,
out=new_hidden, backward_func=tanh_grad,
skip_vars_in_backward_input=hidden)
# User-defined debug functions that print out the input Tensor
paddle.static.py_func(func=debug_func, x=hidden, out=None)
prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax')
ce_loss = paddle.nn.loss.CrossEntropyLoss()
return ce_loss(prediction, label)
x = paddle.static.data(name='x', shape=[1,4], dtype='float32')
y = paddle.static.data(name='y', shape=[1,10], dtype='int64')
res = simple_net(x, y)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
input1 = np.random.random(size=[1,4]).astype('float32')
input2 = np.random.randint(1, 10, size=[1,10], dtype='int64')
out = exe.run(paddle.static.default_main_program(),
feed={'x':input1, 'y':input2},
fetch_list=[res.name])
print(out)
.. code-block:: python
# example 2:
# This example shows how to turn Tensor into numpy array and
# use numpy API to register an Python OP
import paddle
import numpy as np
paddle.enable_static()
def element_wise_add(x, y):
# Tensor must be actively converted to numpy array, otherwise,
# numpy.shape can't be used.
x = np.array(x)
y = np.array(y)
if x.shape != y.shape:
raise AssertionError("the shape of inputs must be the same!")
result = np.zeros(x.shape, dtype='int32')
for i in range(len(x)):
for j in range(len(x[0])):
result[i][j] = x[i][j] + y[i][j]
return result
def create_tmp_var(name, dtype, shape):
return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def py_func_demo():
start_program = paddle.static.default_startup_program()
main_program = paddle.static.default_main_program()
# Input of the forward function
x = paddle.static.data(name='x', shape=[2,3], dtype='int32')
y = paddle.static.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1])
# Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]
paddle.static.py_func(func=element_wise_add, x=[x,y], out=output)
exe=paddle.static.Executor(paddle.CPUPlace())
exe.run(start_program)
# Feed numpy array to main_program
input1 = np.random.randint(1, 10, size=[2,3], dtype='int32')
input2 = np.random.randint(1, 10, size=[2,3], dtype='int32')
out = exe.run(main_program,
feed={'x':input1, 'y':input2},
fetch_list=[output.name])
print("{0} + {1} = {2}".format(input1, input2, out))
py_func_demo()
# Reference output:
# [[5, 9, 9] + [[7, 8, 4] = [array([[12, 17, 13]
# [7, 5, 2]] [1, 3, 3]] [8, 8, 5]], dtype=int32)]
"""
helper = LayerHelper('py_func', **locals())
check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func')
if x is None:
x = []
elif isinstance(x, Variable):
x = [x]
elif isinstance(x, tuple):
x = list(x)
elif not isinstance(x, (list, tuple, Variable)):
raise TypeError('Input must be Variable/list(Variable)/tuple(Variable)')
check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func')
if out is None:
out_list = []
elif isinstance(out, Variable):
out_list = [out]
elif isinstance(out, tuple):
out_list = list(out)
elif isinstance(out, list):
out_list = out
else:
raise TypeError(
'Output must be Variable/list(Variable)/tuple(Variable)')
fwd_func_id = PyFuncRegistry(func).id
bwd_func_id = PyFuncRegistry(
backward_func).id if backward_func is not None else -1
for each_out in out_list:
if len(each_out.shape) == 0:
raise ValueError(
'Output shapes of py_func op should be provided by users manually'
)
backward_skip_vars = set()
if backward_func is not None and skip_vars_in_backward_input is not None:
if isinstance(skip_vars_in_backward_input, Variable):
skip_vars_in_backward_input = [skip_vars_in_backward_input]
fwd_in_out = [v.name for v in x]
fwd_in_out.extend([v.name for v in out_list])
fwd_in_out = set(fwd_in_out)
backward_skip_vars = set()
for v in skip_vars_in_backward_input:
if not v.name in fwd_in_out:
raise ValueError(
'Variable {} is not found in forward inputs and outputs'
.format(v.name))
backward_skip_vars.add(v.name)
helper.append_op(
type='py_func',
inputs={'X': x},
outputs={'Out': out_list},
attrs={
'forward_callable_id': fwd_func_id,
'backward_callable_id': bwd_func_id,
'backward_skip_vars': list(backward_skip_vars)
})
return out
# For debug usage
py_func.registered_func = PyFuncRegistry.registered_func
py_func.registered_func_num = PyFuncRegistry.registered_func_num
@templatedoc()
def psroi_pool(input,
rois,
output_channels,
spatial_scale,
pooled_height,
pooled_width,
name=None):
"""
${comment}
Parameters:
input (Variable): ${x_comment}
rois (Variable): LoDTensor, ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor of shape (num_rois, 4), the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates. The data type is the same as `input`
output_channels (int): ${output_channels_comment}
spatial_scale (float): ${spatial_scale_comment} Default: 1.0
pooled_height (int): ${pooled_height_comment} Default: 1
pooled_width (int): ${pooled_width_comment} Default: 1
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_comment}.
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[100, 490, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7)
"""
helper = LayerHelper('psroi_pool', **locals())
# check attrs
if not isinstance(output_channels, int):
raise TypeError("output_channels must be int type")
if not isinstance(spatial_scale, float):
raise TypeError("spatial_scale must be float type")
if not isinstance(pooled_height, int):
raise TypeError("pooled_height must be int type")
if not isinstance(pooled_width, int):
raise TypeError("pooled_width must be int type")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='psroi_pool',
inputs={'X': input,
'ROIs': rois},
outputs={'Out': out},
attrs={
'output_channels': output_channels,
'spatial_scale': spatial_scale,
'pooled_height': pooled_height,
'pooled_width': pooled_width
})
return out
@templatedoc()
def prroi_pool(input,
rois,
spatial_scale=1.0,
pooled_height=1,
pooled_width=1,
batch_roi_nums=None,
name=None):
"""
The precise roi pooling implementation for paddle. Reference: https://arxiv.org/pdf/1807.11590.pdf
Args:
input (Variable):The input of precise roi pooliing.The shape of input tensor is
[N,C,H,W]. Where N is batch size,C is number of input channels,H
is height of the feature, and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor or Tensor of shape (num_rois, 4), the lod level
is 1 when it is LoDTensor. The LoD include the rois's batch index
information. If rois is Tensor, its batch index information should
be provided by batch_index.
Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates.
spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width).
Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
pooled_height (integer): The pooled output height. Default: 1.
pooled_width (integer): The pooled output width. Default: 1.
batch_roi_nums (Variable): The number of roi for each image in batch. It
should be 1-D Tensor, with shape [N] and dtype int64,
where N is the batch size. Default: None. Be note: The lod of input should be
empty when batch_roi_nums has values;
name (str, default None): The name of this operation.
Returns:
Variable(Tensor):The shape of the returned Tensor is (N, C, pooled_height, pooled_width), with value type float32,float16. N, C denote batch_size and channels of input respectively.
Examples:
.. code-block:: python
## prroi_pool without batch_roi_num
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 490, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
pool_out = fluid.layers.prroi_pool(x, rois, 1.0, 7, 7)
## prroi_pool with batch_roi_num
batchsize=4
x2 = fluid.data(name='x2', shape=[batchsize, 490, 28, 28], dtype='float32')
rois2 = fluid.data(name='rois2', shape=[batchsize, 4], dtype='float32')
batch_rois_num = fluid.data(name='rois_nums', shape=[batchsize], dtype='int64')
pool_out2 = fluid.layers.prroi_pool(x2, rois2, 1.0, 7, 7, batch_roi_nums=batch_rois_num)
"""
check_variable_and_dtype(input, 'input', ['float32'], 'prroi_pool')
check_variable_and_dtype(rois, 'rois', ['float32'], 'prroi_pool')
helper = LayerHelper('prroi_pool', **locals())
# check attrs
if not isinstance(spatial_scale, float):
raise TypeError("spatial_scale must be float type")
if not isinstance(pooled_height, int):
raise TypeError("pooled_height must be int type")
if not isinstance(pooled_width, int):
raise TypeError("pooled_width must be int type")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
inputs_op = {'X': input, 'ROIs': rois}
if batch_roi_nums is not None:
inputs_op['BatchRoINums'] = batch_roi_nums
helper.append_op(
type='prroi_pool',
inputs=inputs_op,
outputs={'Out': out},
attrs={
'spatial_scale': spatial_scale,
'pooled_height': pooled_height,
'pooled_width': pooled_width
})
return out
def pixel_shuffle(x, upscale_factor):
"""
This op rearranges elements in a tensor of shape [N, C, H, W]
to a tensor of shape [N, C/r**2, H*r, W*r].
This is useful for implementing efficient sub-pixel convolution
with a stride of 1/r.
Please refer to the paper: `Real-Time Single Image and Video Super-Resolution
Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158v2>`_ .
by Shi et. al (2016) for more details.
Parameters:
x(Variable): 4-D tensor, the data type should be float32 or float64.
upscale_factor(int): factor to increase spatial resolution.
Returns:
Out(Variable): Reshaped tensor according to the new dimension.
Raises:
ValueError: If the square of upscale_factor cannot divide the channels of input.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[2,9,4,4])
output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,9,4,4).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
# print(output.shape)
# (2L, 1L, 12L, 12L)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle')
helper = LayerHelper("pixel_shuffle", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(upscale_factor, int):
raise TypeError("upscale factor must be int type")
helper.append_op(
type="pixel_shuffle",
inputs={"X": x},
outputs={"Out": out},
attrs={"upscale_factor": upscale_factor})
return out
def fsp_matrix(x, y):
"""
**FSP matrix op**
This op is used to calculate the flow of solution procedure (FSP) matrix of two 4-D Tensor feature maps.
Given feature map x with shape [x_channel, h, w] and feature map y with shape
[y_channel, h, w], we can get the fsp matrix of x and y in two steps:
1. reshape x into matrix with shape [x_channel, h * w] and reshape and
transpose y into matrix with shape [h * w, y_channel].
2. multiply x and y to get fsp matrix with shape [x_channel, y_channel].
The output is a batch of fsp matrices.
Args:
x (Variable): A 4-D Tensor feature map with shape [batch_size, x_channel, height, width].
A Tensor with type float32, float64.
y (Variable): A 4-D Tensor feature map with shape [batch_size, y_channel, height, width].
The y_channel can be different with the x_channel of Input(X)
while the other dimensions must be the same with Input(X)'s. A Tensor with
type float32, float64.
Returns:
fsp matrix (Variable): The output of FSP op with shape [batch_size, x_channel, y_channel].
The x_channel is the channel of x and the y_channel is the channel of y. A Tensor with
type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32])
feature_map_0 = fluid.layers.conv2d(data, num_filters=2,
filter_size=3)
feature_map_1 = fluid.layers.conv2d(feature_map_0, num_filters=2,
filter_size=1)
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix')
helper = LayerHelper('fsp_matrix', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype(
input_param_name='x'))
helper.append_op(type='fsp', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
def continuous_value_model(input, cvm, use_cvm=True):
r"""
**continuous_value_model layers**
Now, this OP is used in CTR project to remove or dispose show and click value in :attr:`input`.
:attr:`input` is an embedding vector including show and click value, whose shape is :math:`[N, D]` (N is batch size. D is `2 + embedding dim` ).
Show and click at first two dims of embedding vector D.
If :attr:`use_cvm` is True, it will calculate :math:`log(show)` and :math:`log(click)` , and output shape is :math:`[N, D]` .
If :attr:`use_cvm` is False, it will remove show and click from :attr:`input` , and output shape is :math:`[N, D - 2]` .
:attr:`cvm` is show_click info, whose shape is :math:`[N, 2]` .
Args:
input (Variable): The input variable. A 2-D LoDTensor with shape :math:`[N, D]` , where N is the batch size, D is `2 + the embedding dim` . `lod level = 1` .
A Tensor with type float32, float64.
cvm (Variable): Show and click variable. A 2-D Tensor with shape :math:`[N, 2]` , where N is the batch size, 2 is show and click.
A Tensor with type float32, float64.
use_cvm (bool): Use show_click or not. if use, the output dim is the same as input.
if not use, the output dim is `input dim - 2` (remove show and click)
Returns:
Variable: A 2-D LodTensor with shape :math:`[N, M]` . if :attr:`use_cvm` = True, M is equal to input dim D. if False, M is equal to `D - 2`. \
A Tensor with same type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[64, 1], dtype="int64")
label = fluid.data(name="label", shape=[64, 1], dtype="int64")
embed = fluid.layers.embedding(
input=input,
size=[100, 11],
dtype='float32')
ones = fluid.layers.fill_constant_batch_size_like(input=label, shape=[-1, 1], dtype="int64", value=1)
show_clk = fluid.layers.cast(fluid.layers.concat([ones, label], axis=1), dtype='float32')
show_clk.stop_gradient = True
input_with_cvm = fluid.layers.continuous_value_model(embed, show_clk, True)
"""
helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype)
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'cvm')
helper.append_op(
type='cvm',
inputs={'X': [input],
'CVM': [cvm]},
outputs={'Y': [out]},
attrs={"use_cvm": use_cvm})
return out
def where(condition):
"""
Return an int64 tensor with rank 2, specifying the coordinate of true element in `condition`.
Args:
condition(Variable): A bool tensor with rank at least 1, the data type is bool.
Returns:
Variable, the output data type is int64. : The tensor variable storing a 2-D tensor, which involves all coordinate.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# condition is a tensor [True, False, True]
condition = layers.assign(np.array([1, 0, 1], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[0], [2]]
# condition is a tensor [[True, False], [False, True]]
condition = layers.assign(np.array([[1, 0], [0, 1]], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[0, 0], [1, 1]]
# condition is a tensor [False, False, False]
condition = layers.assign(np.array([0, 0, 0], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[]]
"""
helper = LayerHelper("where_index", **locals())
if in_dygraph_mode():
return core.ops.where_index(condition)
out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(
type='where_index',
inputs={'Condition': condition},
outputs={'Out': [out]})
return out
@deprecated(since="2.0.0", update_to="paddle.sign")
def sign(x):
r"""
This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
Args:
x(Variable|numpy.ndarray): The input variable could be N-D tensor or N-D numpy array, \
the input data type is float32 or float64.
Returns:
Variable, the output data type is the same as input data type. : The output sign tensor with identical shape to input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# [1.0, 0.0, -1.0]
data = fluid.layers.sign(np.array([3.0, 0.0, -2.0], dtype='float32'))
"""
helper = LayerHelper("sign", **locals())
check_type(x, 'x', (Variable, np.ndarray), 'sign')
if isinstance(x, np.ndarray):
x = assign(x)
check_dtype(x.dtype, 'x', ['float16', 'float32', 'float64'], 'sign')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})
return out
def unique(x, dtype='int32'):
r"""
Return a unique tensor for `x` and an index tensor pointing to this unique tensor.
Args:
x(Tensor): A 1-D input tensor, it's data type should be float32, float64, int32, int64.
dtype(np.dtype|str, optional): The type of index tensor: int32, int64. Default: int32.
Returns:
tuple: (out, index). `out` is the unique tensor for `x`, with identical dtype to `x`, and \
`index` is an index tensor pointing to `out`, by which user can recover the original `x` tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
out, index = fluid.layers.unique(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
"""
check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
"unique")
helper = LayerHelper("unique", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
index = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='unique',
inputs={'X': x},
attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
outputs={'Out': [out],
'Index': [index]})
return out, index
def unique_with_counts(x, dtype='int32'):
r"""
This OP return a unique tensor for `x` , and count tensor that the count of unique result in raw input, \
and an index tensor pointing to this unique tensor.
**NOTICE**: This op support the variable type of Tensor only.
Args:
x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64.
dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32.
Returns:
tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \
and data type of output :attr:`index` and :attr:`count` will be int32 or int64.: The :attr:`out` is unique tensor for input :attr:`x`,\
the data shape is :math:`[K]`, the `K` may be different to the `N` in shape of :attr:`x`. :attr:`index` is an index tensor pointing\
to :attr:`out`, the data shape is :math:`[N]` , the data shape is the same as input :attr:`x`. :attr:`count` is count of unique element in\
the :attr:`x`, the data shape is :math:`[K]`, the data shape is the same as output :attr:`out`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
out, index, count = fluid.layers.unique_with_counts(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
# count is [1, 3, 1, 1]
# x.shape=(6,) out.shape=(4,), index.shape=(6,), count.shape=(4,)
"""
check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
"unique_with_counts")
if not (dtype == 'int32' or dtype == 'int64'):
raise TypeError(
"Op unique_with_counts, index dtype must be int32 or int64")
if x is None or len(x.shape) != 1:
raise ValueError(
"Op unique_with_counts, x must not be null and size of dim must be 1"
)
helper = LayerHelper("unique_with_counts", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
index = helper.create_variable_for_type_inference(dtype)
count = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='unique_with_counts',
inputs={'X': x},
attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
outputs={'Out': [out],
'Index': [index],
'Count': [count]})
return out, index, count
def deformable_conv(input,
offset,
mask,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
deformable_groups=None,
im2col_step=None,
param_attr=None,
bias_attr=None,
modulated=True,
name=None):
r"""
:api_attr: Static Graph
**Deformable Convolution op**
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`
Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Variable): The input image with [N, C, H, W] format. A Tensor with type
float32, float64.
offset (Variable): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
Mask (Variable, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
padding (int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
dilation (int|tuple): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups (int): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
deformable_groups (int): The number of deformable group partitions.
Default: deformable_groups = 1.
im2col_step (int): Maximum number of images per im2col computation;
The total batch size should be devisable by this value or smaller
than this value; if you face out of memory problem, you can try
to use a smaller value here.
Default: im2col_step = 64.
param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
of deformable conv. If it is set to None or one attribute of ParamAttr,
deformable conv will create ParamAttr as param_attr.
If the Initializer of the param_attr is not set, the parameter is
initialized with :math:`Normal(0.0, std)`, and the
:math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
deformable conv layer. If it is set to False, no bias will be added
to the output units. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \
used while True. Default: True.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The tensor variable storing the deformable convolution \
result. A Tensor with type float32, float64.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
#deformable conv v2:
import paddle.fluid as fluid
import paddle
paddle.enable_static()
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1, modulated=True)
#deformable conv v1:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'deformable_conv')
check_variable_and_dtype(offset, "offset", ['float32', 'float64'],
'deformable_conv')
check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv')
num_channels = input.shape[1]
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper('deformable_conv', **locals())
dtype = helper.input_dtype()
if not isinstance(input, Variable):
raise TypeError("Input of deformable_conv must be Variable")
if not isinstance(offset, Variable):
raise TypeError("Input Offset of deformable_conv must be Variable")
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
input_shape = input.shape
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
if modulated:
helper.append_op(
type='deformable_conv',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
'Mask': mask,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
else:
helper.append_op(
type='deformable_conv_v1',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
return output
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
r"""
This op returns a col buffer of sliding local blocks of input x, also known
as im2col for batched 2D image tensors. For each block under the convolution filter,
all element will be rearranged as a column. While the convolution filter sliding over
the input feature map, a series of such columns will be formed.
For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
can be calculated as following.
.. math::
dkernel[0] &= dilations[0] \\times (kernel\_sizes[0] - 1) + 1
dkernel[1] &= dilations[1] \\times (kernel\_sizes[1] - 1) + 1
hout &= \\frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1
wout &= \\frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1
Cout &= C \\times kernel\_sizes[0] \\times kernel\_sizes[1]
Lout &= hout \\times wout
Parameters:
x(Tensor): 4-D Tensor, input tensor of format [N, C, H, W],
data type can be float32 or float64
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k].
strides(int|list): The strides, should be [stride_h, stride_w]
or an integer stride treated as [sride, stride].
For default, strides will be [1, 1].
paddings(int|list): The paddings of each dimension, should be
[padding_top, padding_left, padding_bottom, padding_right]
or [padding_h, padding_w] or an integer padding.
If [padding_h, padding_w] was given, it will expanded to
[padding_h, padding_w, padding_h, padding_w]. If an integer
padding was given, [padding, padding, padding, padding] will
be used. For default, paddings will be [0, 0, 0, 0]
dilations(int|list): the dilations of convolution kernel, should be
[dilation_h, dilation_w], or an integer dilation treated as
[dilation, dilation]. For default, it will be [1, 1].
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The tensor corresponding to the sliding local blocks.
The output shape is [N, Cout, Lout] as decriabled above.
Cout is the total number of values within each block,
and Lout is the total number of such blocks.
The data type of output is the same as the input :math:`x`
Return Type:
Tensor
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.randn((100,3,224,224))
y = F.unfold(x, [3, 3], 1, 1, 1)
"""
helper = LayerHelper("unfold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold')
assert len(x.shape) == 4, \
"input should be the format of [N, C, H, W]"
if isinstance(kernel_sizes, int):
kernel_sizes = [kernel_sizes, kernel_sizes]
else:
assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \
"kernel_sizes should either be an integer or a list of two integers"
if isinstance(strides, int):
strides = [strides, strides]
else:
assert isinstance(strides, list) and (len(strides) == 2), \
"strides should either be an integer or a list of two integers"
if isinstance(dilations, int):
dilations = [dilations, dilations]
else:
assert isinstance(dilations, list) and (len(dilations) == 2), \
"dilations should either be an integer or a list of two integers"
if isinstance(paddings, int):
paddings = [paddings] * 4
elif isinstance(paddings, list):
if len(paddings) == 2:
paddings = paddings * 2
elif len(paddings) == 4:
pass
else:
raise ValueError(
"paddings should either be an integer or a list of 2 or 4 integers"
)
else:
raise ValueError(
"Unexpected type of paddings, it should be either an integer or a list"
"of 2 or 4 integers")
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="unfold",
inputs={"X": x},
outputs={"Y": out},
attrs={
"kernel_sizes": kernel_sizes,
"strides": strides,
"paddings": paddings,
"dilations": dilations
})
return out
def deformable_roi_pooling(input,
rois,
trans,
no_trans=False,
spatial_scale=1.0,
group_size=[1, 1],
pooled_height=1,
pooled_width=1,
part_size=None,
sample_per_part=1,
trans_std=0.1,
position_sensitive=False,
name=None):
r"""
Deformable ROI Pooling Layer
Performs deformable region-of-interest pooling on inputs. As described
in `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_, it will get offset for each bin after
roi pooling so that pooling at correct region. Batch_size will change to the number of region bounding boxes after deformable_roi_pooling.
The operation has three steps:
1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height.
2. Add offset to pixel in ROI to get new location and the new value which are computed directly through
bilinear interpolation with four nearest pixel.
3. Sample several points in each bin to get average values as output.
Args:
input (Variable):The input of deformable roi pooling and it is tensor which value type is float32. The shape of input is
[N, C, H, W]. Where N is batch size, C is number of input channels,
H is height of the feature, and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) with type float32 to pool over. It should be
a 2-D LoDTensor of shape (num_rois, 4), and the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates, which value type is float32.
trans (Variable): Offset of features on ROIs while pooling which value type is float32. The format is [N, C, H, W], where
N is number of ROIs, C is number of channels, which indicate the offset distance
in the x and y directions, H is pooled height, and W is pooled width.
no_trans (bool): Whether to add offset to get new value or not while roi pooling, which value with type bool is True or False.
If value is True, no offset will be added in operation. Default: False.
spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width), which value type is float32.
Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
group_size (list|tuple): The number of groups which input channels are divided and the input is list or tuple, which value type is int32. (eg.number of input channels
is k1 * k2 * (C + 1), which k1 and k2 are group width and height and C+1 is number of output
channels.) eg.(4, 6), which 4 is height of group and 6 is width of group. Default: [1, 1].
pooled_height (int): The pooled output height which value type is int32. Default: 1.
pooled_width (int): The pooled output width which value type is int32. Default: 1.
part_size (list|tuple): The height and width of offset which values in list or tuple is int32, eg.(4, 6), which height is 4 and width is 6, and values always equal to pooled_height \
and pooled_width. Default: if None, default value is [pooled_height, pooled_width].
sample_per_part (int): The number of samples in each bin which value type is int32. If value is bigger, it will consume more performance. Default: 1.
trans_std (float): Coefficient of offset which value type is float32. It controls weight of offset. Default: 0.1.
position_sensitive (bool): Whether to choose deformable psroi pooling mode or not, and value type is bool(True or False). If value is False, input dimension equals to output dimension. \
If value is True, input dimension should be output dimension * pooled_height * pooled_width. Default: False.
name (str|None): Name of layer. Default: None.
Returns:
Variable: Output of deformable roi pooling is that, if position sensitive is False, input dimension equals to output dimension. If position sensitive is True,\
input dimension should be the result of output dimension divided by pooled height and pooled width.
Examples:
.. code-block:: python
# position_sensitive=True
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=True)
# position_sensitive=False
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=False)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(trans, 'trans', ['float32', 'float64'],
'deformable_roi_pooling')
check_type(group_size, 'group_size', (list, tuple),
'deformable_roi_pooling')
if part_size is not None:
check_type(part_size, 'part_size', (list, tuple),
'deformable_roi_pooling')
input_channels = input.shape[1]
if position_sensitive == False:
output_channels = input_channels
else:
output_channels = input_channels / pooled_height / pooled_width
if part_size is None:
part_height = pooled_height
part_width = pooled_width
part_size = [part_height, part_width]
part_size = utils.convert_to_list(part_size, 2, 'part_size')
group_size = utils.convert_to_list(group_size, 2, 'group_size')
helper = LayerHelper('deformable_psroi_pooling', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
top_count = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="deformable_psroi_pooling",
inputs={"Input": input,
"ROIs": rois,
"Trans": trans},
outputs={"Output": output,
"TopCount": top_count},
attrs={
"no_trans": no_trans,
"spatial_scale": spatial_scale,
"output_dim": output_channels,
"group_size": group_size,
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"part_size": part_size,
"sample_per_part": sample_per_part,
"trans_std": trans_std
})
return output
@deprecated(since="2.0.0", update_to="paddle.shard_index")
def shard_index(input, index_num, nshards, shard_id, ignore_value=-1):
"""
Recompute the `input` indices according to the offset of the
shard. The length of the indices is evenly divided into N shards, and if
the `shard_id` matches the shard with the input index inside, the index is
recomputed on the basis of the shard offset, elsewise it is set to
`ignore_value`. The detail is as follows:
::
shard_size = (index_num + nshards - 1) // nshards
y = x % shard_size if x // shard_size == shard_id else ignore_value
NOTE: If the length of indices cannot be evely divided by the shard number,
the size of the last shard will be less than the calculated `shard_size`
Args:
input (Tensor): Input indices with data type int64. It's last dimension must be 1.
index_num (int): An integer defining the range of the index.
nshards (int): The number of shards.
shard_id (int): The index of the current shard.
ignore_value (int): An integer value out of sharded index range.
Returns:
Tensor: The sharded index of input.
Examples:
.. code-block:: python
import paddle
label = paddle.to_tensor([[16], [1]], "int64")
shard_label = paddle.shard_index(input=label,
index_num=20,
nshards=2,
shard_id=0)
print(shard_label)
# [[-1], [1]]
"""
check_variable_and_dtype(input, 'input', ['int64'], 'shard_index')
op_type = 'shard_index'
helper = LayerHelper(op_type, **locals())
if shard_id < 0 or shard_id >= nshards:
raise ValueError('The shard_id(%d) should be in [0, %d)' %
(shard_id, nshards))
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'X': [input]},
outputs={'Out': out},
attrs={
'index_num': index_num,
'nshards': nshards,
'shard_id': shard_id,
'ignore_value': ignore_value
},
stop_gradient=True)
return out
@templatedoc()
def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
r"""
This operator implements the hard_swish activation function.
Hard_swish is proposed in MobileNetV3, and performs better in computational stability and efficiency compared to swish function.
For more details please refer to: https://arxiv.org/pdf/1905.02244.pdf
The formula is as follows:
.. math::
out = \\frac{x * (min(max(0, x+offset), threshold))}{scale}
In the above equation:
``threshold`` and ``scale`` should be positive, ``offset`` can be positive or negative. It is recommended to use default parameters.
Args:
x (Variable): Input feature, multi-dimensional Tensor. The data type should be float32 or float64.
threshold (float, optional): The threshold in Relu function. Default: 6.0
scale (float, optional): The scale factor. Default: 6.0
offset (float, optional): The offset factor. Default: 3.0
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output tensor with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy as np
paddle.enable_static()
DATATYPE='float32'
x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
y = fluid.layers.hard_swish(x)
place = fluid.CPUPlace()
#place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
print(out) # [[0.66666667, 1.66666667,3., 4.]]
"""
if in_dygraph_mode():
return core.ops.hard_swish(x, 'threshold', threshold, 'scale', scale,
'offset', offset)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_swish')
helper = LayerHelper('hard_swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='hard_swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold,
'scale': scale,
'offset': offset})
return out
@templatedoc()
def mish(x, threshold=20, name=None):
r"""
This operator implements the mish activation function.
Refer to `Mish: A Self Regularized Non-Monotonic Neural
Activation Function <https://arxiv.org/abs/1908.08681>`_
The formula is as follows if :attr:`threshold` is :code:`None` or negative:
.. math::
out = x * \\tanh(\\ln(1 + e^{x}))
The formula is as follows if :attr:`threshold` is set as positive value:
.. math::
out = \\begin{cases}
x \\ast \\tanh(x), \\text{if } x > \\text{threshold} \\\\
x \\ast \\tanh(e^{x}), \\text{if } x < -\\text{threshold} \\\\
x \\ast \\tanh(\\ln(1 + e^{x})), \\text{otherwise}
\\end{cases}
Args:
x (Variable): Input feature, multi-dimensional Tensor. The data type
should be float16, float32 or float64.
threshold (float|None): threshold for softplus in Mish operator.
Approximate value of softplus will be used if absolute value
of input is greater than :attr:threshold and :attr:threshold
is set as positive value. For none or negative threshold,
approximate value is not used. Default 20.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`
Returns:
Variable: The output tensor with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
DATATYPE='float32'
x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
y = fluid.layers.mish(x)
place = fluid.CPUPlace()
# place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
print(out) # [[0.66666667, 1.66666667, 3., 4.]]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'mish')
check_type(threshold, 'threshold', (float, int), 'mish')
assert threshold > 0, "threshold of mish should be greater than 0, " \
"but got {}".format(threshold)
helper = LayerHelper('mish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='mish',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold or -1})
return out
def gather_tree(ids, parents):
r"""
To be used after beam search. After beam search, we get selected ids at
each time step and the corresponding parents in the search tree. Both ids
and parents have the layout :attr:`[max_time, batch_size, beam_size]`. Then
:attr:`gather_tree` is used to backtrace from the last time step and
generate the full sequences by collecting selected ids.
Here is an example:
.. code-block:: text
Given:
ids = [[[2 2]
[6 1]]
[[3 9]
[6 1]]
[[0 1]
[9 0]]]
parents = [[[0 0]
[1 1]]
[[1 0]
[1 0]]
[[0 0]
[0 1]]]
Then:
gather_tree(ids, parents)
= [[[2 2]
[1 6]]
[[3 3]
[6 1]]
[[0 1]
[9 0]]]
Args:
ids(Tensor): A Tensor with shape :attr:`[length, batch_size, beam_size]`
and data type :attr:`int32` or :attr:`int64`. It contains the selected
ids of all time steps.
parents(Tensor): A Tensor with the same shape and data type as :attr:`ids`,
It contains the parents corresponding to selected ids when searching
among beams.
Returns:
A Tensor with the same shape and data type as :attr:`ids`. \
It contains the full sequences. The sequences are collected from \
:attr:`ids` by backtracing according to :attr:`parents`.
Examples:
.. code-block:: python
import paddle
ids = paddle.to_tensor([[[2, 2], [6, 1]], [[3, 9], [6, 1]], [[0, 1], [9, 0]]])
parents = paddle.to_tensor([[[0, 0], [1, 1]], [[1, 0], [1, 0]], [[0, 0], [0, 1]]])
final_sequences = paddle.nn.functional.gather_tree(ids, parents)
# [[[2, 2], [1, 6]], [[3, 3], [6, 1]], [[0, 1], [9, 0]]]
"""
if in_dygraph_mode():
return core.ops.gather_tree(ids, parents)
else:
helper = LayerHelper('gather_tree', **locals())
check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
check_variable_and_dtype(parents, 'parents', ['int32', 'int64'],
'gather_tree')
out = helper.create_variable_for_type_inference(dtype=ids.dtype)
helper.append_op(
type="gather_tree",
inputs={"Ids": ids,
"Parents": parents},
outputs={"Out": out})
return out
@deprecated(since="2.0.0", update_to="paddle.uniform")
@templatedoc()
def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0,
name=None):
"""
This OP returns a Tensor filled with random values sampled from a uniform
distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
Examples:
::
Input:
shape = [1, 2]
Output:
result=[[0.8505902, 0.8397286]]
Args:
shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of
the output Tensor. Supported data types: float32, float64.
Default is float32.
min(float|int, optional): The lower bound on the range of random values
to generate, ``min`` is included in the range. Default is -1.0.
max(float|int, optional): The upper bound on the range of random values
to generate, ``max`` is excluded in the range. Default is 1.0.
seed(int, optional): Random seed used for generating samples. 0 means
use a seed generated by the system. Note that if seed is not 0,
this operator will always generate the same random numbers every
time. Default is 0.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a uniform
distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
Raises:
TypeError: If ``shape`` is not list, tuple, Tensor.
TypeError: If ``dtype`` is not float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
# attr shape is a list which doesn't contain Tensor.
result_1 = fluid.layers.uniform_random(shape=[3, 4])
# [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357],
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249],
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]]
# example 2:
# attr shape is a list which contains Tensor.
dim_1 = fluid.layers.fill_constant([1], "int64", 2)
dim_2 = fluid.layers.fill_constant([1], "int32", 3)
result_2 = fluid.layers.uniform_random(shape=[dim_1, dim_2])
# [[-0.9951253, 0.30757582, 0.9899647 ],
# [ 0.5864527, 0.6607096, -0.8886161 ]]
# example 3:
# attr shape is a Tensor, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = fluid.layers.uniform_random(var_shape)
# if var_shape's value is [2, 3]
# result_3 is:
# [[-0.8517412, -0.4006908, 0.2551912 ],
# [ 0.3364414, 0.36278176, -0.16085452]]
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return core.ops.uniform_random('shape', shape, 'min',
float(min), 'max',
float(max), 'seed', seed, 'dtype', dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand')
check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform_random/rand')
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random/rand')
helper = LayerHelper("uniform_random", **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="uniform_random", inputs=inputs, attrs=attrs,
outputs={"Out": out})
utils.try_set_static_shape_tensor(out, shape)
return out
def unbind(input, axis=0):
"""
Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.
Args:
input (Variable): The input variable which is an N-D Tensor, data type being float32, float64, int32 or int64.
axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind. If :math:`axis < 0`, the
dimension to unbind along is :math:`rank(input) + axis`. Default is 0.
Returns:
list(Variable): The list of segmented Tensor variables.
Example:
.. code-block:: python
import paddle
# input is a variable which shape is [3, 4, 5]
input = paddle.fluid.data(
name="input", shape=[3, 4, 5], dtype="float32")
[x0, x1, x2] = paddle.tensor.unbind(input, axis=0)
# x0.shape [4, 5]
# x1.shape [4, 5]
# x2.shape [4, 5]
[x0, x1, x2, x3] = paddle.tensor.unbind(input, axis=1)
# x0.shape [3, 5]
# x1.shape [3, 5]
# x2.shape [3, 5]
# x3.shape [3, 5]
"""
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],
'unbind')
if not isinstance(axis, (int)):
raise TypeError("The type of 'axis' must be int, but received %s." %
(type(axis)))
if isinstance(axis, np.generic):
axis = np.asscalar(axis)
input_shape = input.shape
axis_ = axis if axis >= 0 else len(input_shape) + axis
num = input_shape[axis_]
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type="unbind",
inputs={"X": input},
outputs={"Out": outs},
attrs={"axis": axis})
return outs
| 39.677602
| 946
| 0.577992
|
2b7dc6fc7937f15df8462efc8889da9dc7fcfb71
| 6,335
|
py
|
Python
|
libs/openpyxl/comments/comment_sheet.py
|
rocketbot-cl/PivotTableExcel
|
041c8db2bbcd9655d30bf4c37aca4902b3d1db7a
|
[
"MIT"
] | null | null | null |
libs/openpyxl/comments/comment_sheet.py
|
rocketbot-cl/PivotTableExcel
|
041c8db2bbcd9655d30bf4c37aca4902b3d1db7a
|
[
"MIT"
] | 1
|
2021-02-08T20:31:28.000Z
|
2021-02-08T20:31:28.000Z
|
venv/Lib/site-packages/openpyxl/comments/comment_sheet.py
|
ansonsry/Freshshop
|
79ab8beb1aa993f6365182c8d3bb478ee4e028f8
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
## Incomplete!
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Float,
Integer,
Set,
String,
Bool,
)
from openpyxl.descriptors.excel import Guid, ExtensionList
from openpyxl.descriptors.sequence import NestedSequence
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.constants import SHEET_MAIN_NS
from openpyxl.xml.functions import tostring
from openpyxl.cell.text import Text
from .author import AuthorList
from .comments import Comment
from .shape_writer import ShapeWriter
class ObjectAnchor(Serialisable):
moveWithCells = Bool(allow_none=True)
sizeWithCells = Bool(allow_none=True)
#z-order = Integer(allow_none=True) needs alias
#from
#to defs from xdr
def __init__(self,
moveWithCells=None,
sizeWithCells=None,
#z-order=None,
):
self.moveWithCells = moveWithCells
self.sizeWithCells = sizeWithCells
#self.z-order = z-order
class Properties(Serialisable):
locked = Bool(allow_none=True)
defaultSize = Bool(allow_none=True)
_print = Bool(allow_none=True)
disabled = Bool(allow_none=True)
uiObject = Bool(allow_none=True)
autoFill = Bool(allow_none=True)
autoLine = Bool(allow_none=True)
altText = String(allow_none=True)
textHAlign = Set(values=(['left', 'center', 'right', 'justify', 'distributed']))
textVAlign = Set(values=(['top', 'center', 'bottom', 'justify', 'distributed']))
lockText = Bool(allow_none=True)
justLastX = Bool(allow_none=True)
autoScale = Bool(allow_none=True)
rowHidden = Bool(allow_none=True)
colHidden = Bool(allow_none=True)
anchor = Typed(expected_type=ObjectAnchor, )
__elements__ = ('anchor',)
def __init__(self,
locked=None,
defaultSize=None,
_print=None,
disabled=None,
uiObject=None,
autoFill=None,
autoLine=None,
altText=None,
textHAlign=None,
textVAlign=None,
lockText=None,
justLastX=None,
autoScale=None,
rowHidden=None,
colHidden=None,
anchor=None,
):
self.locked = locked
self.defaultSize = defaultSize
self._print = _print
self.disabled = disabled
self.uiObject = uiObject
self.autoFill = autoFill
self.autoLine = autoLine
self.altText = altText
self.textHAlign = textHAlign
self.textVAlign = textVAlign
self.lockText = lockText
self.justLastX = justLastX
self.autoScale = autoScale
self.rowHidden = rowHidden
self.colHidden = colHidden
self.anchor = anchor
class CommentRecord(Serialisable):
tagname = "comment"
ref = String()
authorId = Integer()
guid = Guid(allow_none=True)
shapeId = Integer(allow_none=True)
text = Typed(expected_type=Text)
commentPr = Typed(expected_type=Properties, allow_none=True)
author = String(allow_none=True)
__elements__ = ('text', 'commentPr')
__attrs__ = ('ref', 'authorId', 'guid', 'shapeId')
def __init__(self,
ref="",
authorId=0,
guid=None,
shapeId=0,
text=None,
commentPr=None,
author=None,
height=79,
width=144
):
self.ref = ref
self.authorId = authorId
self.guid = guid
self.shapeId = shapeId
if text is None:
text = Text()
self.text = text
self.commentPr = commentPr
self.author = author
self.height = height
self.width = width
@classmethod
def from_cell(cls, cell):
"""
Class method to convert cell comment
"""
comment = cell._comment
ref = cell.coordinate
self = cls(ref=ref, author=comment.author)
self.text.t = comment.content
self.height = comment.height
self.width = comment.width
return self
@property
def content(self):
"""
Remove all inline formatting and stuff
"""
return self.text.content
class CommentSheet(Serialisable):
tagname = "comments"
authors = Typed(expected_type=AuthorList)
commentList = NestedSequence(expected_type=CommentRecord, count=0)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
_id = None
_path = "/xl/comments/comment{0}.xml"
mime_type = "application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml"
_rel_type = "comments"
_rel_id = None
__elements__ = ('authors', 'commentList')
def __init__(self,
authors=None,
commentList=None,
extLst=None,
):
self.authors = authors
self.commentList = commentList
def to_tree(self):
tree = super(CommentSheet, self).to_tree()
tree.set("xmlns", SHEET_MAIN_NS)
return tree
@property
def comments(self):
"""
Return a dictionary of comments keyed by coord
"""
authors = self.authors.author
for c in self.commentList:
yield c.ref, Comment(c.content, authors[c.authorId], c.height, c.width)
@classmethod
def from_comments(cls, comments):
"""
Create a comment sheet from a list of comments for a particular worksheet
"""
authors = IndexedList()
# dedupe authors and get indexes
for comment in comments:
comment.authorId = authors.add(comment.author)
return cls(authors=AuthorList(authors), commentList=comments)
def write_shapes(self, vml=None):
"""
Create the VML for comments
"""
sw = ShapeWriter(self.comments)
return sw.write(vml)
@property
def path(self):
"""
Return path within the archive
"""
return self._path.format(self._id)
| 27.188841
| 90
| 0.597632
|
bb38b678679f047fb4c6daaa6def4f7ddc57666d
| 27,730
|
py
|
Python
|
sifter.py
|
jakiki6/sandsifter
|
d852d01560be908b85cb95734d31ace8909b85b1
|
[
"BSD-3-Clause"
] | 2
|
2021-04-21T17:18:09.000Z
|
2021-07-29T15:50:36.000Z
|
sifter.py
|
jakiki6/sandsifter
|
d852d01560be908b85cb95734d31ace8909b85b1
|
[
"BSD-3-Clause"
] | null | null | null |
sifter.py
|
jakiki6/sandsifter
|
d852d01560be908b85cb95734d31ace8909b85b1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
# instruction injector frontend
#
# github.com/xoreaxeaxeax/sandsifter // domas // @xoreaxeaxeax
#
# run as sudo for best results
import signal
import sys
import subprocess
import os
from struct import *
from capstone import *
from collections import namedtuple
from collections import deque
import threading
import time
import curses
from binascii import hexlify
import re
import random
import argparse
import code
import copy
from ctypes import *
INJECTOR = "./injector"
arch = ""
OUTPUT = "./data/"
LOG = OUTPUT + "log"
SYNC = OUTPUT + "sync"
TICK = OUTPUT + "tick"
LAST = OUTPUT + "last"
class ThreadState:
pause = False
run = True
class InjectorResults(Structure):
_fields_ = [('disas_length', c_int),
('disas_known', c_int),
('raw_insn', c_ubyte * 16),
('valid', c_int),
('length', c_int),
('signum', c_int),
('sicode', c_int),
('siaddr', c_int),
]
class Settings:
SYNTH_MODE_RANDOM = "r"
SYNTH_MODE_BRUTE = "b"
SYNTH_MODE_TUNNEL = "t"
synth_mode = SYNTH_MODE_RANDOM
root = False
seed = 0
args = ""
def __init__(self, args):
if "-r" in args:
self.synth_mode = self.SYNTH_MODE_RANDOM
elif "-b" in args:
self.synth_mode = self.SYNTH_MODE_BRUTE
elif "-t" in args:
self.synth_mode = self.SYNTH_MODE_TUNNEL
self.args = args
self.root = (os.geteuid() == 0)
self.seed = random.getrandbits(32)
def increment_synth_mode(self):
if self.synth_mode == self.SYNTH_MODE_BRUTE:
self.synth_mode = self.SYNTH_MODE_RANDOM
elif self.synth_mode == self.SYNTH_MODE_RANDOM:
self.synth_mode = self.SYNTH_MODE_TUNNEL
elif self.synth_mode == self.SYNTH_MODE_TUNNEL:
self.synth_mode = self.SYNTH_MODE_BRUTE
class Tests:
r = InjectorResults() # current result
IL=20 # instruction log len
UL=10 # artifact log len
il = deque(maxlen=IL) # instruction log
al = deque(maxlen=UL) # artifact log
ad = dict() # artifact dict
ic = 0 # instruction count
ac = 0 # artifact count
start_time = time.time()
def elapsed(self):
m, s = divmod(time.time() - self.start_time, 60)
h, m = divmod(m, 60)
return "%02d:%02d:%02d.%02d" % (h, m, int(s), int(100*(s-int(s))) )
class Tee(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
def flush(self):
self.file.flush()
self.stdout.flush()
# capstone disassembler
md = None
def disas_capstone(b):
global md, arch
if not md:
if arch == "64":
md = Cs(CS_ARCH_X86, CS_MODE_64)
else:
md = Cs(CS_ARCH_X86, CS_MODE_32)
try:
address, size, mnemonic, op_str = next(md.disasm_lite(b, 0))
except StopIteration:
mnemonic="(unk)"
op_str=""
size = 0
return (mnemonic, op_str, size)
# ndisasm disassembler
# (ndidsasm breaks unnecessary prefixes onto its own line, which makes parsing
# the output difficult. really only useful with the -P0 flag to disallow
# prefixes)
def disas_ndisasm(b):
b = ''.join('\\x%02x' % ord(c) for c in b)
if arch == "64":
dis, errors = subprocess.Popen("echo -ne '%s' | ndisasm -b64 - | head -2" % b,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
else:
dis, errors = subprocess.Popen("echo -ne '%s' | ndisasm -b32 - | head -2" % b,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
dis = dis.split("\n")
extra = dis[1]
dis = dis[0].split(None, 4)
if extra.strip()[0] == '-':
dis[1] = dis[1] + extra.strip()[1:]
address = dis[0]
insn = dis[1]
mnemonic = dis[2]
if len(dis) > 3:
op_str = dis[3]
else:
op_str = ""
if mnemonic == "db":
mnemonic = "(unk)"
insn = ""
op_str = ""
size = len(insn)//2
return (mnemonic, op_str, size)
# objdump disassembler
# (objdump breaks unnecessary prefixes onto its own line, which makes parsing
# the output difficult. really only useful with the -P0 flag to disallow
# prefixes)
def disas_objdump(b):
with open("/dev/shm/shifter", "w") as f:
f.write(b)
if arch == "64":
dis, errors = subprocess.Popen("objdump -D --insn-width=256 -b binary \
-mi386 -Mx86-64 /dev/shm/shifter | head -8 | tail -1",
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
else:
dis, errors = subprocess.Popen("objdump -D --insn-width=256 -b binary \
-mi386 /dev/shm/shifter | head -8 | tail -1",
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
dis = dis[6:] # address
raw = dis[:256*3].replace(" ","")
dis = dis[256*3:].strip().split(None, 2)
mnemonic = dis[0]
if len(dis) > 1:
op_str = dis[1]
else:
op_str = ""
if mnemonic == "(bad)":
mnemonic = "(unk)"
insn = ""
op_str = ""
size = len(raw)//2
return (mnemonic, op_str, size)
def cstr2py(s):
return ''.join([chr(x) for x in s])
# targeting python 2.6 support
def int_to_comma(x):
if type(x) not in [type(0), type(0)]:
raise TypeError("Parameter must be an integer.")
if x < 0:
return '-' + int_to_comma(-x)
result = ''
while x >= 1000:
x, r = divmod(x, 1000)
result = ",%03d%s" % (r, result)
return "%d%s" % (x, result)
def result_string(insn, result):
s = "%30s %2d %2d %2d %2d (%s)\n" % (
hexlify(insn).decode(), result.valid,
result.length, result.signum,
result.sicode, hexlify(cstr2py(result.raw_insn).encode()).decode())
return s
class Injector:
process = None
settings = None
command = None
def __init__(self, settings):
self.settings = settings
def start(self):
self.command = "%s %s -%c -R %s -s %d" % \
(
INJECTOR,
" ".join(self.settings.args),
self.settings.synth_mode,
"-0" if self.settings.root else "",
self.settings.seed
)
self.process = subprocess.Popen(
"exec %s" % self.command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=os.setsid
)
def stop(self):
if self.process:
try:
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
except OSError:
pass
class Poll:
SIGILL = 4
SIGSEGV = 11
SIGFPE = 8
SIGBUS = 7
SIGTRAP = 5
def __init__(self, ts, injector, tests, command_line, sync=False, low_mem=False, search_unk=True,
search_len=False, search_dis=False, search_ill=False, disassembler=disas_capstone):
self.ts = ts
self.injector = injector
self.T = tests
self.poll_thread = None
self.sync = sync
self.low_mem = low_mem
self.search_len = search_len
self.search_unk = search_unk
self.search_dis = search_dis
self.search_ill = search_ill
self.disas = disassembler
if self.sync:
with open(SYNC, "w") as f:
f.write("#\n")
f.write("# %s\n" % command_line)
f.write("# %s\n" % injector.command)
f.write("#\n")
f.write("# cpu:\n")
cpu = get_cpu_info()
for l in cpu:
f.write("# %s\n" % l)
f.write("# %s v l s c\n" % (" " * 28))
def start(self):
self.poll_thread = threading.Thread(target=self.poll)
self.poll_thread.start()
def stop(self):
self.poll_thread.join()
while self.ts.run:
time.sleep(.1)
def poll(self):
while self.ts.run:
while self.ts.pause:
time.sleep(.1)
bytes_polled = self.injector.process.stdout.readinto(self.T.r)
if bytes_polled == sizeof(self.T.r):
self.T.ic = self.T.ic + 1
error = False
if self.T.r.valid:
if self.search_unk and not self.T.r.disas_known and self.T.r.signum != self.SIGILL:
error = True
if self.search_len and self.T.r.disas_known and self.T.r.disas_length != self.T.r.length:
error = True
if self.search_dis and self.T.r.disas_known \
and self.T.r.disas_length != self.T.r.length and self.T.r.signum != self.SIGILL:
error = True
if self.search_ill and self.T.r.disas_known and self.T.r.signum == self.SIGILL:
error = True
if error:
insn = cstr2py(self.T.r.raw_insn).encode()[:self.T.r.length]
r = copy.deepcopy(self.T.r)
self.T.al.appendleft(r)
if insn not in self.T.ad:
if not self.low_mem:
self.T.ad[insn] = r
self.T.ac = self.T.ac + 1
if self.sync:
with open(SYNC, "a") as f:
f.write(result_string(insn, self.T.r))
else:
if self.injector.process.poll() is not None:
self.ts.run = False
break
class Gui:
TIME_SLICE = .01
GRAY_BASE = 50
TICK_MASK = 0xff
RATE_Q = 100
RATE_FACTOR = 1000
INDENT = 10
GRAYS = 50
BLACK = 1
WHITE = 2
BLUE = 3
RED = 4
GREEN = 5
COLOR_BLACK = 16
COLOR_WHITE = 17
COLOR_BLUE = 18
COLOR_RED = 19
COLOR_GREEN = 20
def __init__(self, ts, injector, tests, do_tick, disassembler=disas_capstone):
self.ts = ts;
self.injector = injector
self.T = tests
self.gui_thread = None
self.do_tick = do_tick
self.ticks = 0
self.last_ins_count = 0
self.delta_log = deque(maxlen=self.RATE_Q)
self.time_log = deque(maxlen=self.RATE_Q)
self.disas = disassembler
self.stdscr = curses.initscr()
curses.start_color()
# doesn't work
# self.orig_colors = [curses.color_content(x) for x in xrange(256)]
curses.use_default_colors()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.stdscr.nodelay(1)
self.sx = 0
self.sy = 0
self.init_colors()
self.stdscr.bkgd(curses.color_pair(self.WHITE))
self.last_time = time.time()
def init_colors(self):
if curses.has_colors() and curses.can_change_color():
curses.init_color(self.COLOR_BLACK, 0, 0, 0)
curses.init_color(self.COLOR_WHITE, 1000, 1000, 1000)
curses.init_color(self.COLOR_BLUE, 0, 0, 1000)
curses.init_color(self.COLOR_RED, 1000, 0, 0)
curses.init_color(self.COLOR_GREEN, 0, 1000, 0)
# this will remove flicker, but gives boring colors
'''
self.COLOR_BLACK = curses.COLOR_BLACK
self.COLOR_WHITE = curses.COLOR_WHITE
self.COLOR_BLUE = curses.COLOR_BLUE
self.COLOR_RED = curses.COLOR_RED
self.COLOR_GREEN = curses.COLOR_GREEN
'''
for i in range(0, self.GRAYS):
curses.init_color(
self.GRAY_BASE + i,
i * 1000 // (self.GRAYS - 1),
i * 1000 // (self.GRAYS - 1),
i * 1000 // (self.GRAYS - 1)
)
curses.init_pair(
self.GRAY_BASE + i,
self.GRAY_BASE + i,
self.COLOR_BLACK
)
else:
self.COLOR_BLACK = curses.COLOR_BLACK
self.COLOR_WHITE = curses.COLOR_WHITE
self.COLOR_BLUE = curses.COLOR_BLUE
self.COLOR_RED = curses.COLOR_RED
self.COLOR_GREEN = curses.COLOR_GREEN
for i in range(0, self.GRAYS):
curses.init_pair(
self.GRAY_BASE + i,
self.COLOR_WHITE,
self.COLOR_BLACK
)
curses.init_pair(self.BLACK, self.COLOR_BLACK, self.COLOR_BLACK)
curses.init_pair(self.WHITE, self.COLOR_WHITE, self.COLOR_BLACK)
curses.init_pair(self.BLUE, self.COLOR_BLUE, self.COLOR_BLACK)
curses.init_pair(self.RED, self.COLOR_RED, self.COLOR_BLACK)
curses.init_pair(self.GREEN, self.COLOR_GREEN, self.COLOR_BLACK)
def gray(self, scale):
if curses.can_change_color():
return curses.color_pair(self.GRAY_BASE + int(round(scale * (self.GRAYS - 1))))
else:
return curses.color_pair(self.WHITE)
def box(self, window, x, y, w, h, color):
for i in range(1, w - 1):
window.addch(y, x + i, curses.ACS_HLINE, color)
window.addch(y + h - 1, x + i, curses.ACS_HLINE, color)
for i in range(1, h - 1):
window.addch(y + i, x, curses.ACS_VLINE, color)
window.addch(y + i, x + w - 1, curses.ACS_VLINE, color)
window.addch(y, x, curses.ACS_ULCORNER, color)
window.addch(y, x + w - 1, curses.ACS_URCORNER, color)
window.addch(y + h - 1, x, curses.ACS_LLCORNER, color)
window.addch(y + h - 1, x + w - 1, curses.ACS_LRCORNER, color)
def bracket(self, window, x, y, h, color):
for i in range(1, h - 1):
window.addch(y + i, x, curses.ACS_VLINE, color)
window.addch(y, x, curses.ACS_ULCORNER, color)
window.addch(y + h - 1, x, curses.ACS_LLCORNER, color)
def vaddstr(self, window, x, y, s, color):
for i in range(0, len(s)):
window.addch(y + i, x, s[i], color)
def draw(self):
try:
self.stdscr.erase()
# constants
left = self.sx + self.INDENT
top = self.sy
top_bracket_height = self.T.IL
top_bracket_middle = self.T.IL // 2
mne_width = 10
op_width = 45
raw_width = (16*2)
# render log bracket
self.bracket(self.stdscr, left - 1, top, top_bracket_height + 2, self.gray(1))
# render logo
self.vaddstr(self.stdscr, left - 3, top + top_bracket_middle - 5, "sand", self.gray(.2))
self.vaddstr(self.stdscr, left - 3, top + top_bracket_middle + 5, "sifter", self.gray(.2))
# refresh instruction log
synth_insn = cstr2py(self.T.r.raw_insn).encode()
mnemonic, op_str, size = self.disas(synth_insn)
self.T.il.append(
(
mnemonic,
op_str,
self.T.r.length,
"%s" % hexlify(synth_insn).decode()
)
)
# render instruction log
try:
for (i, r) in enumerate(self.T.il):
line = i + self.T.IL - len(self.T.il)
(mnemonic, op_str, length, raw) = r
if i == len(self.T.il) - 1:
# latest instruction
# mnemonic
self.stdscr.addstr(
top + 1 + line,
left,
"%*s " % (mne_width, mnemonic),
self.gray(1)
)
# operands
self.stdscr.addstr(
top + 1 + line,
left + (mne_width + 1),
"%-*s " % (op_width, op_str),
curses.color_pair(self.BLUE)
)
# bytes
if self.maxx > left + (mne_width + 1) + (op_width + 1) + (raw_width + 1):
self.stdscr.addstr(
top + 1 + line,
left + (mne_width + 1) + (op_width + 1),
"%s" % raw[0:length * 2],
self.gray(.9)
)
self.stdscr.addstr(
top + 1 +line,
left + (mne_width + 1) + (op_width + 1) + length * 2,
"%s" % raw[length * 2:raw_width],
self.gray(.3)
)
else:
# previous instructions
# mnemonic, operands
self.stdscr.addstr(
top + 1 + line,
left,
"%*s %-*s" % (mne_width, mnemonic, op_width, op_str),
self.gray(.5)
)
# bytes
if self.maxx > left + (mne_width + 1) + (op_width + 1) + (raw_width + 1):
self.stdscr.addstr(
top + 1 + line,
left + (mne_width + 1) + (op_width + 1),
"%s" % raw[0:length * 2],
self.gray(.3)
)
self.stdscr.addstr(
top + 1 + line,
left + (mne_width + 1) + (op_width + 1) + length * 2,
"%s" % raw[length * 2:raw_width],
self.gray(.1)
)
except RuntimeError:
# probably the deque was modified by the poller
pass
# rate calculation
self.delta_log.append(self.T.ic - self.last_ins_count)
self.last_ins_count = self.T.ic
ctime = time.time()
self.time_log.append(ctime - self.last_time)
self.last_time = ctime
rate = int(sum(self.delta_log)//sum(self.time_log))
# render timestamp
if self.maxx > left + (mne_width + 1) + (op_width + 1) + (raw_width + 1):
self.vaddstr(
self.stdscr,
left + (mne_width + 1) + (op_width + 1) + (raw_width + 1),
top + 1,
self.T.elapsed(),
self.gray(.5)
)
# render injection settings
self.stdscr.addstr(top + 1, left - 8, "%d" % self.injector.settings.root, self.gray(.1))
self.stdscr.addstr(top + 1, left - 7, "%s" % arch, self.gray(.1))
self.stdscr.addstr(top + 1, left - 3, "%c" % self.injector.settings.synth_mode, self.gray(.5))
# render injection results
self.stdscr.addstr(top + top_bracket_middle, left - 6, "v:", self.gray(.5))
self.stdscr.addstr(top + top_bracket_middle, left - 4, "%2x" % self.T.r.valid)
self.stdscr.addstr(top + top_bracket_middle + 1, left - 6, "l:", self.gray(.5))
self.stdscr.addstr(top + top_bracket_middle + 1, left - 4, "%2x" % self.T.r.length)
self.stdscr.addstr(top + top_bracket_middle + 2, left - 6, "s:", self.gray(.5))
self.stdscr.addstr(top + top_bracket_middle + 2, left - 4, "%2x" % self.T.r.signum)
self.stdscr.addstr(top + top_bracket_middle + 3, left - 6, "c:", self.gray(.5))
self.stdscr.addstr(top + top_bracket_middle + 3, left - 4, "%2x" % self.T.r.sicode)
# render instruction count
self.stdscr.addstr(top + top_bracket_height + 2, left, "#", self.gray(.5))
self.stdscr.addstr(top + top_bracket_height + 2, left + 2,
"%s" % (int_to_comma(self.T.ic)), self.gray(1))
# render rate
self.stdscr.addstr(top + top_bracket_height + 3, left,
" %d/s%s" % (rate, " " * min(rate // self.RATE_FACTOR, 100)), curses.A_REVERSE)
# render artifact count
self.stdscr.addstr(top + top_bracket_height + 4, left, "#", self.gray(.5))
self.stdscr.addstr(top + top_bracket_height + 4, left + 2,
"%s" % (int_to_comma(self.T.ac)), curses.color_pair(self.RED))
# render artifact log
if self.maxy >= top + top_bracket_height + 5 + self.T.UL + 2:
# render artifact bracket
self.bracket(self.stdscr, left - 1, top + top_bracket_height + 5, self.T.UL + 2, self.gray(1))
# render artifacts
try:
for (i, r) in enumerate(self.T.al):
y = top_bracket_height + 5 + i
insn_hex = hexlify(cstr2py(r.raw_insn).encode()).decode()
# unexplainable hack to remove some of the unexplainable
# flicker on my console. a bug in ncurses? doesn't
# happen if using curses.COLOR_RED instead of a custom
# red. doesn't happen if using a new random string each
# time; doesn't happen if using a constant string each
# time. only happens with the specific implementation below.
#TODO: on systems with limited color settings, this
# makes the background look like random characters
random_string = ("%02x" % random.randint(0,100)) * (raw_width-2)
self.stdscr.addstr(top + 1 + y, left, random_string, curses.color_pair(self.BLACK))
self.stdscr.addstr(top + 1 + y, left + 1,
"%s" % insn_hex[0:r.length * 2], curses.color_pair(self.RED))
self.stdscr.addstr(top + 1 + y, left + 1 + r.length * 2,
"%s" % insn_hex[r.length * 2:raw_width], self.gray(.25))
except RuntimeError:
# probably the deque was modified by the poller
pass
self.stdscr.refresh()
except curses.error:
pass
def start(self):
self.gui_thread = threading.Thread(target=self.render)
self.gui_thread.start()
def stop(self):
self.gui_thread.join()
def checkkey(self):
c = self.stdscr.getch()
if c == ord('p'):
self.ts.pause = not self.ts.pause
elif c == ord('q'):
self.ts.run = False
elif c == ord('m'):
self.ts.pause = True
time.sleep(.1)
self.injector.stop()
self.injector.settings.increment_synth_mode()
self.injector.start()
self.ts.pause = False
def render(self):
while self.ts.run:
while self.ts.pause:
self.checkkey()
time.sleep(.1)
(self.maxy,self.maxx) = self.stdscr.getmaxyx()
self.sx = 1
self.sy = max((self.maxy + 1 - (self.T.IL + self.T.UL + 5 + 2))//2, 0)
self.checkkey()
synth_insn = cstr2py(self.T.r.raw_insn).encode()
if synth_insn and not self.ts.pause:
self.draw()
if self.do_tick:
self.ticks = self.ticks + 1
if self.ticks & self.TICK_MASK == 0:
with open(TICK, 'w') as f:
f.write("{}".format(hexlify(synth_insn).decode()))
time.sleep(self.TIME_SLICE)
def get_cpu_info():
with open("/proc/cpuinfo", "r") as f:
cpu = [l.strip() for l in f.readlines()[:7]]
return cpu
def dump_artifacts(r, injector, command_line):
global arch
tee = Tee(LOG, "w")
tee.write("#\n")
tee.write("# %s\n" % command_line)
tee.write("# %s\n" % injector.command)
tee.write("#\n")
tee.write("# insn tested: %d\n" % r.ic)
tee.write("# artf found: %d\n" % r.ac)
tee.write("# runtime: %s\n" % r.elapsed())
tee.write("# seed: %d\n" % injector.settings.seed)
tee.write("# arch: %s\n" % arch)
tee.write("# date: %s\n" % time.strftime("%Y-%m-%d %H:%M:%S"))
tee.write("#\n")
tee.write("# cpu:\n")
cpu = get_cpu_info()
for l in cpu:
tee.write("# %s\n" % l)
tee.write("# %s v l s c\n" % (" " * 28))
for k in sorted(list(r.ad)):
v = r.ad[k]
tee.write(result_string(k, v))
def cleanup(gui, poll, injector, ts, tests, command_line, args):
ts.run = False
if gui:
gui.stop()
if poll:
poll.stop()
if injector:
injector.stop()
'''
# doesn't work
if gui:
for (i, c) in enumerate(gui.orig_colors):
curses.init_color(i, c[0], c[1], c[2])
'''
curses.nocbreak();
curses.echo()
curses.endwin()
dump_artifacts(tests, injector, command_line)
if args.save:
with open(LAST, "w") as f:
f.write(hexlify(cstr2py(tests.r.raw_insn).encode()).decode())
sys.exit(0)
def main():
global arch
def exit_handler(signal, frame):
cleanup(gui, poll, injector, ts, tests, command_line, args)
injector = None
poll = None
gui = None
command_line = " ".join(sys.argv)
parser = argparse.ArgumentParser()
parser.add_argument("--len", action="store_true", default=False,
help="search for length differences in all instructions (instructions\
that executed differently than the disassembler expected, or did not\
exist when the disassembler expected them to)"
)
parser.add_argument("--dis", action="store_true", default=False,
help="search for length differences in valid instructions (instructions\
that executed differently than the disassembler expected)"
)
parser.add_argument("--unk", action="store_true", default=False,
help="search for unknown instructions (instructions that the\
disassembler doesn't know about but successfully execute)"
)
parser.add_argument("--ill", action="store_true", default=False,
help="the inverse of --unk, search for invalid disassemblies\
(instructions that do not successfully execute but that the\
disassembler acknowledges)"
)
parser.add_argument("--tick", action="store_true", default=False,
help="periodically write the current instruction to disk"
)
parser.add_argument("--save", action="store_true", default=False,
help="save search progress on exit"
)
parser.add_argument("--resume", action="store_true", default=False,
help="resume search from last saved state"
)
parser.add_argument("--sync", action="store_true", default=False,
help="write search results to disk as they are found"
)
parser.add_argument("--low-mem", action="store_true", default=False,
help="do not store results in memory"
)
parser.add_argument("injector_args", nargs=argparse.REMAINDER)
args = parser.parse_args()
injector_args = args.injector_args
if "--" in injector_args: injector_args.remove("--")
if not args.len and not args.unk and not args.dis and not args.ill:
print("warning: no search type (--len, --unk, --dis, --ill) specified, results will not be recorded.")
input()
if args.resume:
if "-i" in injector_args:
print("--resume is incompatible with -i")
sys.exit(1)
if os.path.exists(LAST):
with open(LAST, "r") as f:
insn = f.read()
injector_args.extend(['-i',insn])
else:
print("no resume file found")
sys.exit(1)
if not os.path.exists(OUTPUT):
os.makedirs(OUTPUT)
injector_bitness, errors = \
subprocess.Popen(
['file', INJECTOR],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
arch = "64"
ts = ThreadState()
signal.signal(signal.SIGINT, exit_handler)
settings = Settings(args.injector_args)
tests = Tests()
injector = Injector(settings)
injector.start()
poll = Poll(ts, injector, tests, command_line, args.sync,
args.low_mem, args.unk, args.len, args.dis, args.ill)
poll.start()
gui = Gui(ts, injector, tests, args.tick)
gui.start()
while ts.run:
time.sleep(.1)
cleanup(gui, poll, injector, ts, tests, command_line, args)
if __name__ == '__main__':
main()
| 32.777778
| 107
| 0.549802
|
3113c2f7e322f8fc161c40ecbe34907c3c3f1908
| 3,294
|
py
|
Python
|
alipay/aop/api/domain/CreditBankTraining.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/CreditBankTraining.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/CreditBankTraining.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CreditBankTraining(object):
def __init__(self):
self._experience_time = None
self._have_project_certificate = None
self._inst_name = None
self._project_name = None
self._training_outer_id = None
@property
def experience_time(self):
return self._experience_time
@experience_time.setter
def experience_time(self, value):
self._experience_time = value
@property
def have_project_certificate(self):
return self._have_project_certificate
@have_project_certificate.setter
def have_project_certificate(self, value):
self._have_project_certificate = value
@property
def inst_name(self):
return self._inst_name
@inst_name.setter
def inst_name(self, value):
self._inst_name = value
@property
def project_name(self):
return self._project_name
@project_name.setter
def project_name(self, value):
self._project_name = value
@property
def training_outer_id(self):
return self._training_outer_id
@training_outer_id.setter
def training_outer_id(self, value):
self._training_outer_id = value
def to_alipay_dict(self):
params = dict()
if self.experience_time:
if hasattr(self.experience_time, 'to_alipay_dict'):
params['experience_time'] = self.experience_time.to_alipay_dict()
else:
params['experience_time'] = self.experience_time
if self.have_project_certificate:
if hasattr(self.have_project_certificate, 'to_alipay_dict'):
params['have_project_certificate'] = self.have_project_certificate.to_alipay_dict()
else:
params['have_project_certificate'] = self.have_project_certificate
if self.inst_name:
if hasattr(self.inst_name, 'to_alipay_dict'):
params['inst_name'] = self.inst_name.to_alipay_dict()
else:
params['inst_name'] = self.inst_name
if self.project_name:
if hasattr(self.project_name, 'to_alipay_dict'):
params['project_name'] = self.project_name.to_alipay_dict()
else:
params['project_name'] = self.project_name
if self.training_outer_id:
if hasattr(self.training_outer_id, 'to_alipay_dict'):
params['training_outer_id'] = self.training_outer_id.to_alipay_dict()
else:
params['training_outer_id'] = self.training_outer_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CreditBankTraining()
if 'experience_time' in d:
o.experience_time = d['experience_time']
if 'have_project_certificate' in d:
o.have_project_certificate = d['have_project_certificate']
if 'inst_name' in d:
o.inst_name = d['inst_name']
if 'project_name' in d:
o.project_name = d['project_name']
if 'training_outer_id' in d:
o.training_outer_id = d['training_outer_id']
return o
| 32.613861
| 99
| 0.637523
|
19ac3b725dd3b337e776bfb232ad9525b445b0dc
| 17,578
|
py
|
Python
|
python-3.4.4.amd64/Lib/distutils/command/build_py.py
|
CSnap/photogate
|
208272ef39f4e86f40d431da2ca523e21701f789
|
[
"CC0-1.0"
] | null | null | null |
python-3.4.4.amd64/Lib/distutils/command/build_py.py
|
CSnap/photogate
|
208272ef39f4e86f40d431da2ca523e21701f789
|
[
"CC0-1.0"
] | null | null | null |
python-3.4.4.amd64/Lib/distutils/command/build_py.py
|
CSnap/photogate
|
208272ef39f4e86f40d431da2ca523e21701f789
|
[
"CC0-1.0"
] | null | null | null |
"""distutils.command.build_py
Implements the Distutils 'build_py' command."""
import os
import importlib.util
import sys
from glob import glob
from distutils.core import Command
from distutils.errors import *
from distutils.util import convert_path, Mixin2to3
from distutils import log
class build_py (Command):
description = "\"build\" pure Python modules (copy to build directory)"
user_options = [
('build-lib=', 'd', "directory to \"build\" (copy) to"),
('compile', 'c', "compile .py to .pyc"),
('no-compile', None, "don't compile .py files [default]"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['compile', 'force']
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
self.build_lib = None
self.py_modules = None
self.package = None
self.package_data = None
self.package_dir = None
self.compile = 0
self.optimize = 0
self.force = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('force', 'force'))
# Get the distribution options that are aliases for build_py
# options -- list of packages and list of modules.
self.packages = self.distribution.packages
self.py_modules = self.distribution.py_modules
self.package_data = self.distribution.package_data
self.package_dir = {}
if self.distribution.package_dir:
for name, path in self.distribution.package_dir.items():
self.package_dir[name] = convert_path(path)
self.data_files = self.get_data_files()
# Ick, copied straight from install_lib.py (fancy_getopt needs a
# type system! Hell, *everything* needs a type system!!!)
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self):
# XXX copy_file by default preserves atime and mtime. IMHO this is
# the right thing to do, but perhaps it should be an option -- in
# particular, a site administrator might want installed files to
# reflect the time of installation rather than the last
# modification time before the installed release.
# XXX copy_file by default preserves mode, which appears to be the
# wrong thing to do: if a file is read-only in the working
# directory, we want it to be installed read/write so that the next
# installation of the same module distribution can overwrite it
# without problems. (This might be a Unix-specific issue.) Thus
# we turn off 'preserve_mode' when copying to the build directory,
# since the build directory is supposed to be exactly what the
# installation will look like (ie. we preserve mode when
# installing).
# Two options control which modules will be installed: 'packages'
# and 'py_modules'. The former lets us work with whole packages, not
# specifying individual modules at all; the latter is for
# specifying modules one-at-a-time.
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.byte_compile(self.get_outputs(include_bytecode=0))
def get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
data = []
if not self.packages:
return data
for package in self.packages:
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = 0
if src_dir:
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = []
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
filelist = glob(os.path.join(src_dir, convert_path(pattern)))
# Files that match more than one pattern are only added once
files.extend([fn for fn in filelist if fn not in files
and os.path.isfile(fn)])
return files
def build_package_data(self):
"""Copy data files into build directory"""
lastdir = None
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target,
preserve_mode=False)
def get_package_dir(self, package):
"""Return the directory, relative to the top of the source
distribution, where package 'package' should be found
(at least according to the 'package_dir' option, if any)."""
path = package.split('.')
if not self.package_dir:
if path:
return os.path.join(*path)
else:
return ''
else:
tail = []
while path:
try:
pdir = self.package_dir['.'.join(path)]
except KeyError:
tail.insert(0, path[-1])
del path[-1]
else:
tail.insert(0, pdir)
return os.path.join(*tail)
else:
# Oops, got all the way through 'path' without finding a
# match in package_dir. If package_dir defines a directory
# for the root (nameless) package, then fallback on it;
# otherwise, we might as well have not consulted
# package_dir at all, as we just use the directory implied
# by 'tail' (which should be the same as the original value
# of 'path' at this point).
pdir = self.package_dir.get('')
if pdir is not None:
tail.insert(0, pdir)
if tail:
return os.path.join(*tail)
else:
return ''
def check_package(self, package, package_dir):
# Empty dir name means current directory, which we can probably
# assume exists. Also, os.path.exists and isdir don't know about
# my "empty string means current dir" convention, so we have to
# circumvent them.
if package_dir != "":
if not os.path.exists(package_dir):
raise DistutilsFileError(
"package directory '%s' does not exist" % package_dir)
if not os.path.isdir(package_dir):
raise DistutilsFileError(
"supposed package directory '%s' exists, "
"but is not a directory" % package_dir)
# Require __init__.py for all but the "root package"
if package:
init_py = os.path.join(package_dir, "__init__.py")
if os.path.isfile(init_py):
return init_py
else:
log.warn(("package init file '%s' not found " +
"(or not a regular file)"), init_py)
# Either not in a package at all (__init__.py not expected), or
# __init__.py doesn't exist -- so don't return the filename.
return None
def check_module(self, module, module_file):
if not os.path.isfile(module_file):
log.warn("file %s (for module %s) not found", module_file, module)
return False
else:
return True
def find_package_modules(self, package, package_dir):
self.check_package(package, package_dir)
module_files = glob(os.path.join(package_dir, "*.py"))
modules = []
setup_script = os.path.abspath(self.distribution.script_name)
for f in module_files:
abs_f = os.path.abspath(f)
if abs_f != setup_script:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
else:
self.debug_print("excluding %s" % setup_script)
return modules
def find_modules(self):
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages = {}
# List of (package, module, filename) tuples to return
modules = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split('.')
package = '.'.join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package)
checked = 0
if not checked:
init_py = self.check_package(package, package_dir)
packages[package] = (package_dir, 1)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
if not self.check_module(module, module_file):
continue
modules.append((package, module_base, module_file))
return modules
def find_all_modules(self):
"""Compute the list of all modules that will be built, whether
they are specified one-module-at-a-time ('self.py_modules') or
by whole packages ('self.packages'). Return a list of tuples
(package, module, module_file), just like 'find_modules()' and
'find_package_modules()' do."""
modules = []
if self.py_modules:
modules.extend(self.find_modules())
if self.packages:
for package in self.packages:
package_dir = self.get_package_dir(package)
m = self.find_package_modules(package, package_dir)
modules.extend(m)
return modules
def get_source_files(self):
return [module[-1] for module in self.find_all_modules()]
def get_module_outfile(self, build_dir, package, module):
outfile_path = [build_dir] + list(package) + [module + ".py"]
return os.path.join(*outfile_path)
def get_outputs(self, include_bytecode=1):
modules = self.find_all_modules()
outputs = []
for (package, module, module_file) in modules:
package = package.split('.')
filename = self.get_module_outfile(self.build_lib, package, module)
outputs.append(filename)
if include_bytecode:
if self.compile:
outputs.append(importlib.util.cache_from_source(
filename, debug_override=True))
if self.optimize > 0:
outputs.append(importlib.util.cache_from_source(
filename, debug_override=False))
outputs += [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
return outputs
def build_module(self, module, module_file, package):
if isinstance(package, str):
package = package.split('.')
elif not isinstance(package, (list, tuple)):
raise TypeError(
"'package' must be a string (dot-separated), list, or tuple")
# Now put the module source file into the "build" area -- this is
# easy, we just copy it somewhere under self.build_lib (the build
# directory for Python source).
outfile = self.get_module_outfile(self.build_lib, package, module)
dir = os.path.dirname(outfile)
self.mkpath(dir)
return self.copy_file(module_file, outfile, preserve_mode=0)
def build_modules(self):
modules = self.find_modules()
for (package, module, module_file) in modules:
# Now "build" the module -- ie. copy the source file to
# self.build_lib (the build directory for Python source).
# (Actually, it gets copied to the directory for this package
# under self.build_lib.)
self.build_module(module, module_file, package)
def build_packages(self):
for package in self.packages:
# Get list of (package, module, module_file) tuples based on
# scanning the package directory. 'package' is only included
# in the tuple so that 'find_modules()' and
# 'find_package_tuples()' have a consistent interface; it's
# ignored here (apart from a sanity check). Also, 'module' is
# the *unqualified* module name (ie. no dots, no package -- we
# already know its package!), and 'module_file' is the path to
# the .py file, relative to the current directory
# (ie. including 'package_dir').
package_dir = self.get_package_dir(package)
modules = self.find_package_modules(package, package_dir)
# Now loop over the modules we found, "building" each one (just
# copy it to self.build_lib).
for (package_, module, module_file) in modules:
assert package == package_
self.build_module(module, module_file, package)
def byte_compile(self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
prefix = self.build_lib
if prefix[-1] != os.sep:
prefix = prefix + os.sep
# XXX this code is essentially the same as the 'byte_compile()
# method of the "install_lib" command, except for the determination
# of the 'prefix' string. Hmmm.
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=prefix, dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=prefix, dry_run=self.dry_run)
class build_py_2to3(build_py, Mixin2to3):
def run(self):
self.updated_files = []
# Base class code
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
# 2to3
self.run_2to3(self.updated_files)
# Remaining base class code
self.byte_compile(self.get_outputs(include_bytecode=0))
def build_module(self, module, module_file, package):
res = build_py.build_module(self, module, module_file, package)
if res[1]:
# file was copied
self.updated_files.append(res[0])
return res
| 42.153477
| 80
| 0.57572
|
18f2f9cadf0e59e1ca73a78aa0c7a419796785fc
| 405
|
py
|
Python
|
Binary search algorithm/main.py
|
Noha101/python
|
4cafd75f3e588e8dc3cccad786781316dab836f7
|
[
"MIT"
] | null | null | null |
Binary search algorithm/main.py
|
Noha101/python
|
4cafd75f3e588e8dc3cccad786781316dab836f7
|
[
"MIT"
] | 1
|
2021-09-07T09:59:56.000Z
|
2021-09-07T10:00:40.000Z
|
Binary search algorithm/main.py
|
Noha101/python
|
4cafd75f3e588e8dc3cccad786781316dab836f7
|
[
"MIT"
] | 1
|
2021-09-07T09:42:31.000Z
|
2021-09-07T09:42:31.000Z
|
def search(arr, l, r, x):
if r >= l:
mid = l + (r - l) // 2
if arr[mid] == x:
return mid
elif arr[mid] > x:
return search(arr, l, mid-1, x)
else:
return search(arr, mid + 1, r, x)
else:
return -1
arr = [ 2, 3, 4, 10, 40 ]
x = 10
result = search(arr, 0, len(arr)-1, x)
if result != -1:
print(f"Element is present at index {result}")
else:
print("Element is not present in array")
| 18.409091
| 47
| 0.562963
|
00bdc1e96b5a7da9a2192e93c759e5b858e5578a
| 6,351
|
py
|
Python
|
geo_png_tiler/resources.py
|
sasakiassociates/qgis-geo-png-db
|
bb71daa68e3721074482944d12f6323ce5136fed
|
[
"MIT"
] | 1
|
2021-10-01T11:44:59.000Z
|
2021-10-01T11:44:59.000Z
|
geo_png_tiler/resources.py
|
sasakiassociates/qgis-geo-png-db
|
bb71daa68e3721074482944d12f6323ce5136fed
|
[
"MIT"
] | null | null | null |
geo_png_tiler/resources.py
|
sasakiassociates/qgis-geo-png-db
|
bb71daa68e3721074482944d12f6323ce5136fed
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x5c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x01\x54\x00\x00\x01\x54\
\x01\x04\x18\xa5\x96\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\xd9\x49\x44\
\x41\x54\x48\x89\x95\x95\x5d\x4c\x9b\x65\x14\xc7\x7f\xe7\x6d\xcb\
\x4a\x1b\xf0\x73\x0c\xdc\x34\x71\x81\xc5\x80\x8b\x17\xc5\x8f\x44\
\x65\x17\x46\x48\x48\x81\xee\x23\x5c\x39\x11\x5d\xbc\x70\x1b\xa0\
\x17\x2e\xd9\x2e\x6c\x34\x7e\xdc\xe8\x9c\x4c\x97\x88\x11\xa7\x66\
\x86\x9a\x18\xa1\x2f\x0b\x78\x61\x14\xbf\x16\x15\x17\x8d\x10\x43\
\x40\x8c\x23\xe0\x54\x98\x5a\xb6\xd2\xd2\xbe\xc7\x0b\xde\x96\x17\
\xd6\x0e\x76\x6e\x9e\xe7\x9c\xe7\x7f\xfe\xff\x73\x9e\x9c\x27\x8f\
\xe0\xb0\xd1\xee\x8d\x15\x96\x48\x3b\x42\xad\x95\x66\xab\xe1\xc2\
\x05\x30\xf9\x95\x9f\x1f\x3f\x2a\x76\x42\xd9\x69\x78\x86\x4a\x0d\
\xa9\xb1\xdd\x05\xe0\x77\x41\x3e\x49\xa5\x53\xc7\x4a\xfe\xfa\x61\
\x3c\x83\x33\x32\x9b\x9f\xdf\xd9\xf4\xa4\x65\xc8\x08\xc2\x7e\xa0\
\x22\x43\xbe\x4e\xf3\x02\xdb\x14\x3d\xe0\x72\xb9\x46\x67\xcb\xaa\
\x3b\x56\x08\x8c\x9c\x2c\x79\x0a\xd1\x57\x00\xcf\x55\x90\xe6\x33\
\x0f\x2a\x47\x67\x37\xdd\xd9\x0e\x20\xa3\xdd\x1b\x2b\x2c\x43\x46\
\x56\x91\xf7\xcd\x4d\x6e\x98\xbe\x38\x6b\xf8\x01\xfe\x9b\xf6\x4c\
\x8f\x7f\xee\x1b\x77\xb2\x3c\x68\xb8\xe3\xe5\x2e\x97\x1f\x40\x95\
\x22\x85\x1d\x02\x0d\x0e\x48\x32\x9d\x4e\x57\xb9\x2d\x91\x76\x27\
\xb9\xc2\xde\xed\x2d\x7f\xbe\xbf\x56\x99\x47\x2e\x0f\xbd\xfc\x77\
\x59\xf5\x5e\x51\x79\xd7\xf6\x0b\xdc\x2e\x77\xbb\x81\x50\xeb\xac\
\x3c\x17\xf9\xc0\xc0\xc0\xf5\xfd\xfd\xfd\xa5\xaa\x2a\x57\x12\xbd\
\x71\xe6\xfb\xf7\x14\xcc\xe5\x62\xb5\xd6\x00\x6e\xce\x04\x44\x18\
\x72\x26\xf4\xf5\xf5\x35\x9b\xa6\x39\xb2\xb8\xb8\x38\x6b\x59\xd6\
\x8c\x69\x9a\x53\xa6\x69\x1e\x8a\x44\x22\x79\x07\x40\xe0\x33\x87\
\x7b\x8b\xc1\xd2\x04\x2c\x29\xaa\xfc\x9b\xd9\x9b\xa6\x79\x58\x44\
\x7a\x54\xb5\xd2\x91\x70\x93\xaa\xbe\x54\x58\x58\xd8\x13\x0e\x87\
\x0d\x72\x99\x83\x03\xf0\xe6\x04\x45\xa3\xd1\x3b\x54\xf5\xb9\xa5\
\xae\xe4\x37\xe0\x80\xaa\xb6\x02\xdf\xd8\x90\xdd\x81\x40\xa0\x25\
\x5f\x17\x4e\x73\x9f\xf5\x3d\xf0\x65\xc6\xf1\xa7\x63\x71\xfb\x0a\
\x5b\x59\x1a\xe1\xf9\x54\x2a\x55\x13\x0a\x85\xce\x01\x44\x22\x91\
\x53\x3e\x9f\xef\xac\xdd\xd5\xa3\x40\xf7\x6a\xc2\xb1\xfb\x93\xf1\
\xe2\x3f\x5c\x59\x4e\xf7\x2f\x05\x77\xdf\x97\xed\x4e\xf5\xa4\x2d\
\xb0\xcd\x0e\x9d\x09\x85\x42\xe7\xa2\xd1\x68\x58\x55\xbd\x86\x61\
\x0c\xaa\x6a\x2f\x50\x09\xdc\x06\x60\x9a\xe6\x43\x96\x65\xdd\x2e\
\x22\x13\x0d\x0d\x0d\x5d\xa7\xdb\x12\x85\xaa\x92\xe5\x74\xe7\xe9\
\xac\xc0\x5e\x63\xf6\xda\x21\x22\xd7\x58\x96\x15\x13\x91\x4c\x6c\
\x03\x80\x65\x59\x3b\x45\x64\x17\xf0\x29\xd0\x75\xd9\x15\xe5\x62\
\x57\xd5\x21\x11\x99\x53\xd5\x6f\xed\x50\x0a\x40\x44\xbc\xc0\x30\
\xf0\x21\x10\xb7\x63\x85\x4e\xcc\xba\x04\x1a\x1b\x1b\x9f\x5d\x25\
\x38\x29\x22\x37\x00\xb5\xc3\xc3\xc3\xcf\x84\xc3\xe1\x8f\x61\xe9\
\x7d\x2c\x2e\x2e\xde\x63\xc3\x26\x72\x0a\xc4\xa6\xcb\x97\x1d\x6f\
\xbc\x3c\x17\x08\x38\x05\x54\x03\x77\x05\x02\x81\x9e\x68\x34\xfa\
\xba\x88\xf8\x93\xc9\x64\x58\x44\xae\xb3\x3b\xf9\x00\xc0\xf7\xcf\
\xe6\x72\x57\xd2\xb7\xcc\x19\xbf\x50\x9a\x75\xbc\xd7\x9e\x2f\xcb\
\x59\x85\xdb\xfd\x86\x65\x59\xbb\x55\xf5\x5e\x60\x0f\xb0\x47\x55\
\x11\xc9\x3e\xec\x13\xc1\x60\xf0\x0b\x00\x77\xc2\x5f\xe6\x9d\x2f\
\xc9\xe6\xe6\x7e\x2c\xab\xac\xbe\xbe\x3e\xe1\xf1\x78\xea\x80\x13\
\x40\xc2\x71\x74\x01\x78\x3a\x18\x0c\xee\xcf\x97\xeb\x16\x43\xd3\
\x6a\x89\x0b\x20\x9d\xf4\x5e\xca\x07\xac\xab\xab\xbb\x08\x3c\xd1\
\xdb\xdb\x7b\x48\x44\xaa\x54\x35\x91\x48\x24\x46\x9a\x9b\x9b\x93\
\x4e\x9c\x2b\x55\xb8\xcc\x21\x9a\x76\xab\xca\xaf\x40\x05\x40\x32\
\x5e\xbc\x79\xad\x6e\x9a\x9a\x9a\x62\xc0\x99\x7c\xe7\x9e\x44\xd1\
\x96\xac\xa3\x32\x61\xa0\x32\xb8\x2c\x28\x0d\xfb\x9e\xff\xee\xe1\
\xb5\x44\xf2\xd9\xf1\xb7\x1f\x6f\x41\x09\x3a\x42\x83\xf2\xd8\x8b\
\xc3\xe5\x86\xea\x28\x2b\x3f\x1c\x93\xf3\xb1\x29\x62\x0b\x7e\x00\
\x63\x76\x7e\x5a\x7f\x9a\x5a\xf1\xe1\xdc\xba\x6b\x26\x5e\xb4\x7d\
\x6e\xe9\xc3\xb1\xa4\x18\x83\x1d\xab\xc8\x93\x2a\x52\x29\x00\xfb\
\x5e\x18\xee\x10\xf4\x68\xbe\xca\x64\x74\x06\xf9\x7a\xe5\x98\x6f\
\x6d\x9d\x1c\xf2\x6d\xb9\x54\x93\x27\x05\xa0\xed\x60\x6b\x57\xa7\
\x01\xf0\xd6\xe1\xc0\xab\x28\x1d\x40\xf2\x0a\x09\xeb\xb5\xa4\x8a\
\xb4\x1f\x6c\xed\xea\x04\xc7\x98\x76\x1d\xa9\x3e\x66\x89\x54\x81\
\x1c\x07\xc6\x50\x4d\x5f\x05\xe9\x02\x30\x06\x74\xaa\x48\x65\xdb\
\x23\x6f\xbe\x96\x39\xf8\x1f\x98\x14\x7b\xbf\x36\xba\x39\xf5\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0d\
\x06\xe0\x4d\xe2\
\x00\x67\
\x00\x65\x00\x6f\x00\x5f\x00\x70\x00\x6e\x00\x67\x00\x5f\x00\x74\x00\x69\x00\x6c\x00\x65\x00\x72\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x70\x5f\x85\x8e\x6b\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 47.395522
| 103
| 0.7265
|
aceaef86edba34239549a2829cb466ef4917088e
| 2,729
|
py
|
Python
|
sequencing_np/nn/rnn_cells/rnn.py
|
SwordYork/sequencing
|
bcbc2006bf17315411ac3d629f7014f790b70418
|
[
"MIT"
] | 45
|
2017-08-06T15:02:12.000Z
|
2021-01-24T19:12:13.000Z
|
sequencing_np/nn/rnn_cells/rnn.py
|
SwordYork/sequencing
|
bcbc2006bf17315411ac3d629f7014f790b70418
|
[
"MIT"
] | null | null | null |
sequencing_np/nn/rnn_cells/rnn.py
|
SwordYork/sequencing
|
bcbc2006bf17315411ac3d629f7014f790b70418
|
[
"MIT"
] | 14
|
2017-08-07T04:56:55.000Z
|
2019-01-07T09:43:24.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Sword York
# GitHub: https://github.com/SwordYork/sequencing
# No rights reserved.
#
from abc import ABCMeta, abstractmethod
from ..base import Layer
from ... import np, TIME_MAJOR
class RNN(Layer, metaclass=ABCMeta):
def __init__(self, init_state, param_keys, activation=None,
base_name=None, name=None, *args, **kwargs):
"""
numpy rnn cell.
It only used for inferring, not training, thus we don't need initialization
in this implementation.
The weights and other things are passed by params.
:param init_state: initial states of RNN, [B, H] or tuple([B, H], ...)
:param param_keys: name of params, such as kernel and bias
:param activation: activation function
:param base_name: name of parent Layer
:param name: name of this Layer
"""
super(RNN, self).__init__(param_keys, base_name, name, **kwargs)
# get state size
if type(init_state) != type(np.empty([])):
self.init_state = tuple(init_state)
self.hidden_units = tuple(init_state)[0].shape[1]
else:
self.init_state = init_state
self.hidden_units = init_state.shape[1]
self.time_major = TIME_MAJOR
self.activation = activation or np.tanh
def encode(self, inputs, sequence_length=None, reverse=False):
"""
Encode multi-step inputs.
:param inputs: if time_major [T, B, ...] else [B, T, ...]
:param sequence_length: length of the sequence [B]
:param reverse: used in bidirectional RNN
:return: lstm outputs
"""
if not self.time_major:
inputs = np.transpose(inputs, (1, 0, 2))
steps = inputs.shape[0]
outputs = np.zeros(inputs.shape[:-1] + (self.hidden_units,),
inputs.dtype)
state = self.init_state
iter_range = reversed(range(steps)) if reverse else range(steps)
for idx in iter_range:
# rnn step
curr_input = inputs[idx, :, :]
mask = idx < sequence_length if sequence_length is not None else None
outputs[idx, :, :], state = self.step(state, curr_input, mask)
if not self.time_major:
outputs = np.transpose(outputs, (1, 0, 2))
return outputs, state
@abstractmethod
def step(self, prev_states, input_, mask=None):
"""
run rnn for one step
:param prev_states: [B, ...]
:param input_: [B, ...]
:param mask: mask the terminated sequence in the batch
:return: output, state
"""
raise NotImplementedError
| 34.1125
| 83
| 0.596189
|
0e29ca1eb19a7bdcc8c13c84d96ffe07b2543edc
| 2,199
|
py
|
Python
|
xlsxwriter/test/worksheet/test_worksheet05.py
|
DeltaEpsilon7787/XlsxWriter
|
550b9c5bd678c861dcc9f6f4072b33a69566e065
|
[
"BSD-2-Clause-FreeBSD"
] | 2,766
|
2015-01-02T17:36:42.000Z
|
2022-03-31T09:23:30.000Z
|
xlsxwriter/test/worksheet/test_worksheet05.py
|
DeltaEpsilon7787/XlsxWriter
|
550b9c5bd678c861dcc9f6f4072b33a69566e065
|
[
"BSD-2-Clause-FreeBSD"
] | 683
|
2015-01-03T09:55:02.000Z
|
2022-03-31T07:18:15.000Z
|
xlsxwriter/test/worksheet/test_worksheet05.py
|
jmcnamara/test_py_github_actions
|
d445d5d98b038b63453dd70c9c1a9ca1b325cb47
|
[
"BSD-2-Clause-FreeBSD"
] | 636
|
2015-01-05T01:57:08.000Z
|
2022-03-25T18:42:41.000Z
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...sharedstrings import SharedStringTable
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with strings in cells."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
# Write some strings.
worksheet.write_string(0, 0, 'Foo')
worksheet.write_string(2, 0, 'Bar')
worksheet.write_string(2, 3, 'Baz')
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:D3"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:4">
<c r="A1" t="s">
<v>0</v>
</c>
</row>
<row r="3" spans="1:4">
<c r="A3" t="s">
<v>1</v>
</c>
<c r="D3" t="s">
<v>2</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| 32.338235
| 171
| 0.482037
|
93350dabc6d5b32879d8f37d039b2b2a839d1408
| 2,003
|
py
|
Python
|
venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/modules/portal_global_setting.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/modules/portal_global_setting.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/modules/portal_global_setting.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: portal_global_setting
short_description: Resource module for Portal Global Setting
description:
- Manage operation update of the resource Portal Global Setting.
version_added: '1.0.0'
extends_documentation_fragment:
- cisco.ise.module
author: Rafael Campos (@racampos)
options:
customization:
description: Allowed values - HTML, - HTMLANDJAVASCRIPT.
type: str
id:
description: Portal Global Setting's id.
type: str
requirements:
- ciscoisesdk >= 1.1.0
- python >= 3.5
seealso:
# Reference by Internet resource
- name: Portal Global Setting reference
description: Complete reference of the Portal Global Setting object model.
link: https://ciscoisesdk.readthedocs.io/en/latest/api/api.html#v3-0-0-summary
"""
EXAMPLES = r"""
- name: Update by id
cisco.ise.portal_global_setting:
ise_hostname: "{{ise_hostname}}"
ise_username: "{{ise_username}}"
ise_password: "{{ise_password}}"
ise_verify: "{{ise_verify}}"
state: present
customization: string
id: string
"""
RETURN = r"""
ise_response:
description: A dictionary or list with the response returned by the Cisco ISE Python SDK
returned: always
type: dict
sample: >
{
"id": "string",
"customization": "string",
"link": {
"rel": "string",
"href": "string",
"type": "string"
}
}
ise_update_response:
description: A dictionary or list with the response returned by the Cisco ISE Python SDK
returned: always
version_added: "1.1.0"
type: dict
sample: >
{
"UpdatedFieldsList": {
"updatedField": {
"field": "string",
"oldValue": "string",
"newValue": "string"
},
"field": "string",
"oldValue": "string",
"newValue": "string"
}
}
"""
| 24.426829
| 92
| 0.651523
|
edfc7ece408ae46fd8045912656e1b3747cea708
| 105
|
py
|
Python
|
blogsNewsModule/apps.py
|
adityakekare/NewsAPIDjango
|
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
|
[
"MIT"
] | 1
|
2020-10-14T17:13:45.000Z
|
2020-10-14T17:13:45.000Z
|
blogsNewsModule/apps.py
|
adityakekare/NewsAPIDjango
|
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
|
[
"MIT"
] | null | null | null |
blogsNewsModule/apps.py
|
adityakekare/NewsAPIDjango
|
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class BlogsnewsmoduleConfig(AppConfig):
name = 'blogsNewsModule'
| 17.5
| 39
| 0.790476
|
945b428c3dace5fc559da30e07b3607751dcb985
| 3,371
|
py
|
Python
|
src/tests/sys/netinet6/frag6/frag6_02.py
|
lastweek/source-freebsd
|
0821950b0c40cbc891a27964b342e0202a3859ec
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
src/tests/sys/netinet6/frag6/frag6_02.py
|
lastweek/source-freebsd
|
0821950b0c40cbc891a27964b342e0202a3859ec
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
src/tests/sys/netinet6/frag6/frag6_02.py
|
lastweek/source-freebsd
|
0821950b0c40cbc891a27964b342e0202a3859ec
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#!/usr/bin/env python
#-
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019 Netflix, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
import argparse
import scapy.all as sp
import socket
import sys
from sniffer import Sniffer
from time import sleep
def check_icmp6_error(args, packet):
ip6 = packet.getlayer(sp.IPv6)
if not ip6:
return False
oip6 = sp.IPv6(src=args.src[0], dst=args.to[0])
if ip6.dst != oip6.src:
return False
icmp6 = packet.getlayer(sp.ICMPv6ParamProblem)
if not icmp6:
return False
# ICMP6_PARAMPROB_HEADER 0
if icmp6.code != 0:
return False
# Should we check the payload as well?
# We are running in a very isolated environment and nothing else
# should trigger an ICMPv6 Param Prob so leave it.
#icmp6.display()
return True
def main():
parser = argparse.ArgumentParser("frag6.py",
description="IPv6 fragementation test tool")
parser.add_argument('--sendif', nargs=1,
required=True,
help='The interface through which the packet will be sent')
parser.add_argument('--recvif', nargs=1,
required=True,
help='The interface on which to check for the packet')
parser.add_argument('--src', nargs=1,
required=True,
help='The source IP address')
parser.add_argument('--to', nargs=1,
required=True,
help='The destination IP address')
parser.add_argument('--debug',
required=False, action='store_true',
help='Enable test debugging')
args = parser.parse_args()
# Start sniffing on recvif
sniffer = Sniffer(args, check_icmp6_error)
########################################################################
#
# A single start fragment with payload length not % 8.
#
# A: Error handling in code.
# R: ICMPv6 param problem.
#
data = "6" * 1287
ip6f01 = sp.Ether() / \
sp.IPv6(src=args.src[0], dst=args.to[0]) / \
sp.IPv6ExtHdrFragment(offset=0, m=1, id=5) / \
sp.UDP(dport=3456, sport=6543) / \
data
if args.debug :
ip6f01.display()
sp.sendp(ip6f01, iface=args.sendif[0], verbose=False)
sleep(0.10)
sniffer.setEnd()
sniffer.join()
if not sniffer.foundCorrectPacket:
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| 30.645455
| 76
| 0.719371
|
24f117ff3d75ff91ae9a2a9cd2d8f1fa088a9e24
| 4,045
|
py
|
Python
|
SC101_Assignments/SC101_Assignment6/boggle.py
|
JIllchen487/StanCode101
|
8025dd4e4cf0cb3d14d7314ef4ea6dfff3e5b5cc
|
[
"MIT"
] | null | null | null |
SC101_Assignments/SC101_Assignment6/boggle.py
|
JIllchen487/StanCode101
|
8025dd4e4cf0cb3d14d7314ef4ea6dfff3e5b5cc
|
[
"MIT"
] | null | null | null |
SC101_Assignments/SC101_Assignment6/boggle.py
|
JIllchen487/StanCode101
|
8025dd4e4cf0cb3d14d7314ef4ea6dfff3e5b5cc
|
[
"MIT"
] | null | null | null |
"""
File: boggle.py
Name:
----------------------------------------
TODO:
"""
import time
# This is the file name of the dictionary txt file
# we will be checking if a word exists by searching through it
FILE = 'dictionary.txt'
dic = []
def main():
"""
TODO:
"""
start = time.time()
####################
global dic
dic = read_dictionary()
# print(dic)
l1 = input_row('1 row of letters: ')
# l1 = ['f', 'y', 'c', 'l']
l2 = input_row('2 row of letters: ')
# l2 = ['i', 'o', 'm', 'g']
l3 = input_row('3 row of letters: ')
# l3 = ['o', 'r', 'i', 'l']
l4 = input_row('4 row of letters: ')
# l4 = ['h', 'j', 'h', 'u']
boggle_board = create_board([l1, l2, l3, l4])
find_words(boggle_board)
####################
end = time.time()
print('----------------------------------')
print(f'The speed of your boggle algorithm: {end - start} seconds.')
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
with open(FILE, 'r') as f:
for word in f:
dic.append(word[:-1])
return dic
def input_row(pmp):
a = input(pmp)
lst = a.split()
for i in lst:
if (not i.isalpha()) or len(i) != 1:
print('Illegal Input')
return
return lst
def create_board(list_of_lists):
board = {}
for i in range(4):
lst = list_of_lists[i]
for j in range(4):
board[(j, i)] = lst[j]
return board
def find_words(board):
"""
:param board: (dictionary) A dictionary that is constructed by inputs of the boggle board
:return: does not return anything but print out all the words with count
"""
chosen = []
for x in range(4):
for y in range(4):
coordinates = (x, y)
forming_word = board[coordinates]
visited = [coordinates]
find_words_helper(coordinates, board, chosen, forming_word, visited)
print(len(chosen))
def find_words_helper(coordinates, board, chosen_words, forming_word, visited):
"""
:param coordinates: (tuple) the pivot point to start with when searching for words
:param board: (dictionary) A dictionary that is constructed by inputs of the boggle board
:param chosen_words: (list) contains all the chosen vocab
:param visited: (list) contains all the coordinates of chosen neighbors
:return: does not return anything but print out all the words with count
"""
global dic
neighbors = neighbor(coordinates, board)
# Base Case
if forming_word in dic and len(forming_word) >= 4:
if forming_word not in chosen_words:
print('Found: ', forming_word)
chosen_words.append(forming_word)
# Choose
for neighbor_coordinates in neighbors:
if neighbor_coordinates not in visited:
new_word = forming_word + neighbors[neighbor_coordinates]
if has_prefix(new_word):
visited.append(neighbor_coordinates)
# Explore
find_words_helper(neighbor_coordinates, board, chosen_words, new_word, visited)
# Un-choose
visited.pop()
def neighbor(coordinates, board):
neighbors = {}
x = coordinates[0]
y = coordinates[1]
for i in range(-1, 2):
neighbor_x = x + i
if 0 <= neighbor_x <= 3:
for j in range(-1, 2):
neighbor_y = y + j
if 0 <= neighbor_y <= 3 and (i, j) != (0, 0):
neighbors[(neighbor_x, neighbor_y)] = board[(neighbor_x, neighbor_y)]
return neighbors
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for ele in dic:
if ele.startswith(sub_s):
return True
return False
if __name__ == '__main__':
main()
| 28.286713
| 99
| 0.576267
|
9a3c54ee09fd8abced82afa7e500c813a3dfc431
| 5,190
|
py
|
Python
|
libs/labelFile.py
|
PaKyong/labelImg
|
8a5f08d624b4bb797aa4fad445e4fd7bb23c58cf
|
[
"MIT"
] | 11
|
2018-10-17T08:57:27.000Z
|
2020-08-07T02:43:31.000Z
|
libs/labelFile.py
|
PaKyong/labelImg
|
8a5f08d624b4bb797aa4fad445e4fd7bb23c58cf
|
[
"MIT"
] | 25
|
2020-09-25T22:33:07.000Z
|
2022-03-12T00:15:27.000Z
|
libs/labelFile.py
|
PaKyong/labelImg
|
8a5f08d624b4bb797aa4fad445e4fd7bb23c58cf
|
[
"MIT"
] | 11
|
2019-11-02T01:31:20.000Z
|
2021-08-15T12:49:27.000Z
|
# Copyright (c) 2016 Tzutalin
# Create by TzuTaLin <tzu.ta.lin@gmail.com>
try:
from PyQt5.QtGui import QImage
except ImportError:
from PyQt4.QtGui import QImage
from base64 import b64encode, b64decode
from libs.pascal_voc_io import PascalVocWriter
from libs.yolo_io import YOLOWriter
from libs.pascal_voc_io import XML_EXT
import os.path
import sys
class LabelFileError(Exception):
pass
class LabelFile(object):
# It might be changed as window creates. By default, using XML ext
# suffix = '.lif'
suffix = XML_EXT
def __init__(self, filename=None):
self.shapes = ()
self.imagePath = None
self.imageData = None
self.verified = False
def savePascalVocFormat(self, filename, shapes, imagePath, imageData,
lineColor=None, fillColor=None, databaseSrc=None):
imgFolderPath = os.path.dirname(imagePath)
imgFolderName = os.path.split(imgFolderPath)[-1]
imgFileName = os.path.basename(imagePath)
#imgFileNameWithoutExt = os.path.splitext(imgFileName)[0]
# Read from file path because self.imageData might be empty if saving to
# Pascal format
image = QImage()
image.load(imagePath)
imageShape = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
writer = PascalVocWriter(imgFolderName, imgFileName,
imageShape, localImgPath=imagePath)
writer.verified = self.verified
for shape in shapes:
points = shape['points']
label = shape['label']
# Add Chris
difficult = int(shape['difficult'])
bndbox = LabelFile.convertPoints2BndBox(points)
writer.addBndBox(bndbox[0], bndbox[1], bndbox[2], bndbox[3], label, difficult)
writer.save(targetFile=filename)
return
def saveYoloFormat(self, filename, shapes, imagePath, imageData, classList,
lineColor=None, fillColor=None, databaseSrc=None):
imgFolderPath = os.path.dirname(imagePath)
imgFolderName = os.path.split(imgFolderPath)[-1]
imgFileName = os.path.basename(imagePath)
#imgFileNameWithoutExt = os.path.splitext(imgFileName)[0]
# Read from file path because self.imageData might be empty if saving to
# Pascal format
image = QImage()
image.load(imagePath)
imageShape = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
writer = YOLOWriter(imgFolderName, imgFileName,
imageShape, localImgPath=imagePath)
writer.verified = self.verified
for shape in shapes:
points = shape['points']
label = shape['label']
# Add Chris
difficult = int(shape['difficult'])
bndbox = LabelFile.convertPoints2BndBox(points)
writer.addBndBox(bndbox[0], bndbox[1], bndbox[2], bndbox[3], label, difficult)
writer.save(targetFile=filename, classList=classList)
return
def toggleVerify(self):
self.verified = not self.verified
''' ttf is disable
def load(self, filename):
import json
with open(filename, 'rb') as f:
data = json.load(f)
imagePath = data['imagePath']
imageData = b64decode(data['imageData'])
lineColor = data['lineColor']
fillColor = data['fillColor']
shapes = ((s['label'], s['points'], s['line_color'], s['fill_color'])\
for s in data['shapes'])
# Only replace data after everything is loaded.
self.shapes = shapes
self.imagePath = imagePath
self.imageData = imageData
self.lineColor = lineColor
self.fillColor = fillColor
def save(self, filename, shapes, imagePath, imageData, lineColor=None, fillColor=None):
import json
with open(filename, 'wb') as f:
json.dump(dict(
shapes=shapes,
lineColor=lineColor, fillColor=fillColor,
imagePath=imagePath,
imageData=b64encode(imageData)),
f, ensure_ascii=True, indent=2)
'''
@staticmethod
def isLabelFile(filename):
fileSuffix = os.path.splitext(filename)[1].lower()
return fileSuffix == LabelFile.suffix
@staticmethod
def convertPoints2BndBox(points):
xmin = float('inf')
ymin = float('inf')
xmax = float('-inf')
ymax = float('-inf')
for p in points:
x = p[0]
y = p[1]
xmin = min(x, xmin)
ymin = min(y, ymin)
xmax = max(x, xmax)
ymax = max(y, ymax)
# Martin Kersner, 2015/11/12
# 0-valued coordinates of BB caused an error while
# training faster-rcnn object detector.
if xmin < 1:
xmin = 1
if ymin < 1:
ymin = 1
return (int(xmin), int(ymin), int(xmax), int(ymax))
| 35.306122
| 91
| 0.581696
|
b16e02eebc00ebfd9713d5d3be84c9cedd91651f
| 411
|
py
|
Python
|
old_django_malliva/communications/models.py
|
olubiyiontheweb/malliva
|
b212e6b359eed54c92533f0a02afe3c0042150e2
|
[
"MIT"
] | null | null | null |
old_django_malliva/communications/models.py
|
olubiyiontheweb/malliva
|
b212e6b359eed54c92533f0a02afe3c0042150e2
|
[
"MIT"
] | null | null | null |
old_django_malliva/communications/models.py
|
olubiyiontheweb/malliva
|
b212e6b359eed54c92533f0a02afe3c0042150e2
|
[
"MIT"
] | 1
|
2021-07-19T12:15:52.000Z
|
2021-07-19T12:15:52.000Z
|
from django.db import models
from django.contrib.auth import get_user_model
from django.db.models.deletion import DO_NOTHING
User = get_user_model()
# Create your models here.
class Message(models.Model):
id = models.BigAutoField(primary_key=True)
initiated_by = models.ForeignKey(User, on_delete=DO_NOTHING, blank=False)
# received_by = models.ForeignKey(User, on_delete=DO_NOTHING, blank=False)
| 34.25
| 78
| 0.788321
|
7052b592186ce90ac9ede26c6b8d3fc11ce5c40f
| 5,142
|
py
|
Python
|
CondTools/Ecal/test/tools/inspectEcal.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
CondTools/Ecal/test/tools/inspectEcal.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
CondTools/Ecal/test/tools/inspectEcal.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
#! /usr/bin/env python
from __future__ import print_function
import os,sys, DLFCN,getopt
sys.setdlopenflags(DLFCN.RTLD_GLOBAL+DLFCN.RTLD_LAZY)
from CondCore.Utilities import iovInspector as inspect
from pluginCondDBPyInterface import *
import pluginCondDBPyInterface as CondDB
from ROOT import TCanvas,TH1F, TH2F,TFile
def unhashEBDetId(i):
pseudo_eta= i/360 - 85;
ieta=0
if pseudo_eta <0 :
ieta = pseudo_eta
else :
ieta = pseudo_eta +1
iphi = i%360 +1
return ieta,iphi
def setWhat(w,ret) :
for key in ret.keys():
_val = ret[key]
if (isinstance(_val, type([]))) :
_vi = CondDB.VInt()
for i in _val :
_vi.append(i)
exec ('w.set_'+key+'(_vi)')
else :
exec ('w.set_'+key+'(w.'+key+'().'+ret[key]+')')
return w
def usage():
print("inspectEcal -c [connectstring] -P [authpath] -t [tag] -f [outfile] -l -h")
print(" dump records in xml")
print(" -l: list tags and exit")
print(" -f [file] : dump to file")
print(" -p plot distribution ")
print(" -q compare [tag] ")
print(" -r reference [tag] ")
print(" -m draw map")
print(" -h : help")
try:
opts, args = getopt.getopt(sys.argv[1:], "c:P:t:f:lhpq:r:m", ["connect=","authpath=","tag","file","listtags","help","plot","compare","reference","map"])
if not len(opts):
usage()
sys.exit(0)
except getopt.GetoptError:
#* print help information and exit:*
usage()
sys.exit(2)
dbName = "oracle://cms_orcoff_prod/CMS_COND_31X_ECAL"
authpath= "/afs/cern.ch/cms/DB/conddb"
tag='EcalIntercalibConstants_mc'
do_list_tags= 0
dump_to_file =0
outfile=""
do_plot=0
do_compare=0
compare_tag=""
reference_tag=""
drawmap=0
for opt,arg in opts:
if opt in ("-c","--connect"):
try:
dbname=arg
except Exception as er :
print(er)
if opt in ("-P","--authpath"):
try:
rdbms=RDBMS(arg)
except Exception as er :
print(er)
if opt in ("-t","--tag"):
tag=arg
if opt in ("-l","--listtags"):
do_list_tags= 1
if opt in ("-f","--file"):
dump_to_file= 1
outfile=arg
if opt in ("-p","--plot"):
do_plot= 1
if opt in ("-q","--compare"):
do_compare=1
compare_tag=arg
if opt in ("-r","--reference"):
reference_tag=arg
if opt in ("-m","--map"):
drawmap=1
if opt in ("-h","--help"):
usage()
sys.exit(0)
a = FWIncantation()
rdbms = RDBMS(authpath)
db = rdbms.getDB(dbName)
if do_list_tags :
tags=db.allTags()
for tag in tags.split():
print(tag)
sys.exit(0)
try :
iov = inspect.Iov(db,tag)
print("===iov list ===")
iovlist=iov.list()
print(iovlist)
print("===iov summaries ===")
print(iov.summaries())
print("===payload dump ===")
for p in iovlist:
payload=inspect.PayLoad(db,p[0])
#print payload.summary()
if dump_to_file:
print("Dumping to file:", outfile)
out = open(outfile,"w")
print(payload, file=out)
else:
#print payload
if drawmap:
payload.plot("plot","",[],[])
if do_plot:
exec('import '+db.moduleName(tag)+' as Plug')
#what = {'how':'singleChannel','which': [0,1,2]}
what = {'how':'barrel'}
w = setWhat(Plug.What(),what)
ex = Plug.Extractor(w)
for elem in db.iov(tag).elements :
p = Plug.Object(elem)
p.extract(ex)
v = [i for i in ex.values()]
# print v
histo=TH1F("h","h",100,-2,2)
for c in v :
histo.Fill(c)
f=TFile("f.root","recreate")
histo.Write()
if do_compare:
exec('import '+db.moduleName(tag)+' as Plug')
what = {'how':'barrel'}
w = setWhat(Plug.What(),what)
ex = Plug.Extractor(w)
for elem in db.iov(reference_tag).elements :
p = Plug.Object(elem)
p.extract(ex)
coeff_1 = [i for i in ex.values()]
for elem in db.iov(compare_tag).elements :
p = Plug.Object(elem)
p.extract(ex)
coeff_2 = [i for i in ex.values()]
can=TCanvas("c","c")
histo = TH1F("h","h",100,-2,2)
for i,c in enumerate(coeff_1):
histo.Fill(c-coeff_2[i])
histo.Draw()
can.SaveAs("h.svg")
can2=TCanvas("cc","cc")
histo2=TH2F("hh","hh",171,-85,86,360,1,361)
for i,c in enumerate(coeff_1):
factor = c/coeff_2[i]
ieta,iphi= unhashEBDetId(i)
histo2.Fill(ieta,iphi,factor)
histo2.SetStats(0)
histo2.Draw("colz")
can2.SaveAs("h2.svg")
except Exception as er :
print(er)
| 25.455446
| 156
| 0.508751
|
1f7006bd3b5a6d1addef7364850c8a6336d4d5f8
| 1,085
|
py
|
Python
|
scripts/speech/audio-features-cat.py
|
zhrlove/seq2seq_attention_1
|
6535820c9381467508ba8dfeb8971173b3998510
|
[
"Apache-2.0"
] | 1
|
2019-01-02T15:57:32.000Z
|
2019-01-02T15:57:32.000Z
|
scripts/speech/audio-features-cat.py
|
zhrlove/seq2seq_attention_1
|
6535820c9381467508ba8dfeb8971173b3998510
|
[
"Apache-2.0"
] | null | null | null |
scripts/speech/audio-features-cat.py
|
zhrlove/seq2seq_attention_1
|
6535820c9381467508ba8dfeb8971173b3998510
|
[
"Apache-2.0"
] | 2
|
2020-08-02T18:28:54.000Z
|
2021-07-30T07:28:40.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import struct
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('inputs', nargs='+')
parser.add_argument('output')
args = parser.parse_args()
with open(args.output, 'wb') as output_file:
lines = 0
dim = None
for filename in args.inputs:
with open(filename, 'rb') as input_file:
header = input_file.read(8)
lines_, dim_ = struct.unpack('ii', header)
lines += lines_
if dim is not None and dim_ != dim:
raise Exception('incompatible dimensions')
dim = dim_
output_file.write(struct.pack('ii', lines, dim))
for filename in args.inputs:
with open(filename, 'rb') as input_file:
header = input_file.read(8)
lines_, dim_ = struct.unpack('ii', header)
for _ in range(lines_):
x = input_file.read(4)
frames, = struct.unpack('i', x)
output_file.write(x)
output_file.write(input_file.read(4 * frames * dim))
| 29.324324
| 68
| 0.581567
|
5bc5d289b252759bc894df219be54dfcc74df5c5
| 2,389
|
py
|
Python
|
annoyed-alligators/socl_media/urls.py
|
nishithshowri006/summer-code-jam-2020
|
a38a7c9c5e2578a803e18640a10c7d4ab96753e6
|
[
"MIT"
] | null | null | null |
annoyed-alligators/socl_media/urls.py
|
nishithshowri006/summer-code-jam-2020
|
a38a7c9c5e2578a803e18640a10c7d4ab96753e6
|
[
"MIT"
] | null | null | null |
annoyed-alligators/socl_media/urls.py
|
nishithshowri006/summer-code-jam-2020
|
a38a7c9c5e2578a803e18640a10c7d4ab96753e6
|
[
"MIT"
] | 1
|
2021-07-10T14:23:55.000Z
|
2021-07-10T14:23:55.000Z
|
"""socl_media URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from socl_media.apps.users import views as users_views
from socl_media.apps.chat.views import ChatListView
urlpatterns = [
path('', include('socl_media.apps.feed.urls')),
path('login/', auth_views.LoginView.as_view(
template_name='users/login.html'), name="login"),
path('signup/', users_views.signup, name="signup"),
path('logout/', auth_views.LogoutView.as_view(), name="logout"),
path('password-reset/', auth_views.PasswordResetView.as_view(
template_name='users/password_reset.html'), name="password_reset"),
path('password-reset/done/', auth_views.PasswordResetDoneView.as_view(
template_name='users/password_reset_done.html'),
name="password_reset_done"),
path('password-reset-confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name='users/password_reset_confirm.html'),
name="password_reset_confirm"),
path('password-reset-complete',
auth_views.PasswordResetCompleteView.as_view(
template_name='users/password_reset_complete.html'),
name="password_reset_complete"),
path('password-change/', auth_views.PasswordChangeView.as_view(
template_name='users/password_change_form.html'),
name="password_change"),
path('password-change/done', auth_views.PasswordChangeDoneView.as_view(
template_name='users/password_change_done.html'),
name="password_change_done"),
path('admin/', admin.site.urls),
path('terminal/', include('socl_media.apps.terminal.urls')),
path('message-box/', ChatListView.as_view(), name='message-box')
]
| 45.942308
| 77
| 0.716199
|
3a480926e9a613c3913c9f6812668572b750cae7
| 5,871
|
py
|
Python
|
pyscisci/datasource/readwrite.py
|
kishorevasan/pyscisci
|
f067fa2c9feba457042aa86546270d81a4844e7e
|
[
"MIT"
] | null | null | null |
pyscisci/datasource/readwrite.py
|
kishorevasan/pyscisci
|
f067fa2c9feba457042aa86546270d81a4844e7e
|
[
"MIT"
] | null | null | null |
pyscisci/datasource/readwrite.py
|
kishorevasan/pyscisci
|
f067fa2c9feba457042aa86546270d81a4844e7e
|
[
"MIT"
] | null | null | null |
import os
import sys
import pandas as pd
import numpy as np
from unidecode import unidecode
import html
# determine if we are loading from a jupyter notebook (to make pretty progress bars)
if 'ipykernel' in sys.modules:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
from pyscisci.utils import isin_sorted
def load_int(v):
try:
return int(v)
except ValueError:
return None
def load_float(v):
try:
return float(v)
except ValueError:
return None
def load_html_str(s):
if s is None:
return ''
else:
return unidecode(html.unescape(s)).strip()
def load_xml_text(root_element, default=''):
if root_element is None or len(root_element) == 0:
try:
return root_element.text
except:
return default
else:
return root_element[0].text
def load_preprocessed_data(dataname, path2database, columns = None, filter_dict=None, duplicate_subset=None,
duplicate_keep='last', dropna=None, keep_source_file=False, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the preprocessed DataFrame from a preprocessed directory.
Parameters
----------
:param dataname : str
The type of preprocessed data to load.
:param path2database : str
The path to the database directory.
:param columns : list, default None
Load only this subset of columns
:param filter_dict : dict, default None
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
:param duplicate_subset : list, default None
Drop any duplicate entries as specified by this subset of columns
:param duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
:param dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
:param keep_source_file : bool, default False
Keep track of the source file the data was loaded from.
:param prefunc2apply : callable, default None
A function to apply to each of the sub-DataFrames as they are loaded before filtering.
:param postfunc2apply : callable, default None
A function to apply to each of the sub-DataFrames as they are loaded after filtering.
Returns
-------
DataFrame
dataname DataFrame.
"""
path2files = os.path.join(path2database, dataname)
if not os.path.exists(path2files):
# TODO: make a real warning
raise NotImplementedError("First preprocess the raw data.")
return []
if isinstance(columns, str):
columns = [columns]
if isinstance(dropna, str):
dropna = [dropna]
if isinstance(duplicate_subset, str):
duplicate_subset = [duplicate_subset]
if isinstance(filter_dict, dict):
filter_dict = {isinkey:np.sort(isinlist) for isinkey, isinlist in filter_dict.items()}
FileNumbers = sorted([int(fname.replace(dataname, '').split('.')[0]) for fname in os.listdir(path2files) if dataname in fname])
desc=''
if isinstance(show_progress, str):
desc = show_progress
data_df = []
for ifile in tqdm(FileNumbers, desc=desc, leave=True, disable=not show_progress):
fname = os.path.join(path2files, dataname+"{}.hdf".format(ifile))
subdf = pd.read_hdf(fname, mode = 'r')
if callable(prefunc2apply):
subdf = prefunc2apply(subdf)
if isinstance(columns, list):
subdf = subdf[columns]
if isinstance(dropna, list):
subdf.dropna(subset = dropna, inplace = True, how = 'any')
if isinstance(filter_dict, dict):
for isinkey, isinlist in filter_dict.items():
subdf = subdf[isin_sorted(subdf[isinkey], isinlist)]
if isinstance(duplicate_subset, list):
subdf.drop_duplicates(subset = duplicate_subset, keep = duplicate_keep, inplace = True)
if keep_source_file:
subdf['filetag'] = ifile
if callable(postfunc2apply):
postfunc2apply(subdf)
data_df.append(subdf)
data_df = pd.concat(data_df)
if isinstance(duplicate_subset, list):
data_df.drop_duplicates(subset = duplicate_subset, keep = duplicate_keep, inplace = True)
data_df.name = dataname
return data_df
def append_to_preprocessed_df(newdf, path2database, preprocessname, startcount=0):
"""
Append the newdf to the preprocessed DataFrames from a preprocessed directory.
Parameters
----------
:param newdf : DataFrame
The new DataFrame to save on the processed data. It must have at least one column in common.
:param path2database : str
The path to the database directory.
:param preprocessname : str
The type of preprocessed data to which we append the new data.
"""
path2files = os.path.join(path2database, preprocessname)
Nfiles = sum(preprocessname in fname for fname in os.listdir(path2files))
for ifile in range(Nfiles):
datadf = pd.read_hdf(os.path.join(path2files, preprocessname + '{}.hdf'.format(ifile+startcount)))
datadf = datadf.merge(newdf, how = 'left')
datadf.to_hdf(os.path.join(path2files, preprocessname + '{}.hdf'.format(ifile+startcount)), key = preprocessname, mode = 'w')
| 32.436464
| 151
| 0.654744
|
84da9ccbf194d3b94b1df994e24d7f640e48f2b2
| 160
|
py
|
Python
|
examples/models/create_supervised_opf.py
|
gugarosa/opfython
|
19b467a92d85c7c26d231efec770645096827b4e
|
[
"Apache-2.0"
] | 26
|
2018-04-24T20:16:18.000Z
|
2022-03-09T14:03:28.000Z
|
examples/models/create_supervised_opf.py
|
gugarosa/opfython
|
19b467a92d85c7c26d231efec770645096827b4e
|
[
"Apache-2.0"
] | 4
|
2020-12-26T14:57:18.000Z
|
2022-03-30T02:34:18.000Z
|
examples/models/create_supervised_opf.py
|
gugarosa/opfython
|
19b467a92d85c7c26d231efec770645096827b4e
|
[
"Apache-2.0"
] | 16
|
2019-05-20T15:41:56.000Z
|
2022-03-23T17:59:53.000Z
|
from opfython.models import SupervisedOPF
# Creates a SupervisedOPF instance
opf = SupervisedOPF(distance='log_squared_euclidean', pre_computed_distance=None)
| 32
| 81
| 0.85
|
654b8fd60ba8f7734cf55bc1951fc3401b19d545
| 6,159
|
py
|
Python
|
jyotisha/custom_transliteration.py
|
vedatemple/jyotisha
|
02ff8530c567ad534905300d63b17da177e90226
|
[
"MIT"
] | null | null | null |
jyotisha/custom_transliteration.py
|
vedatemple/jyotisha
|
02ff8530c567ad534905300d63b17da177e90226
|
[
"MIT"
] | null | null | null |
jyotisha/custom_transliteration.py
|
vedatemple/jyotisha
|
02ff8530c567ad534905300d63b17da177e90226
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
import swisseph as swe
import sys
from math import floor
from indic_transliteration import xsanscript as sanscript
import logging
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
def romanise(iast_text):
swapTable = {'ā': 'a', 'Ā': 'A', 'ī': 'i', 'ū': 'u', 'ṅ': 'n', 'ṇ': 'n',
'ḍ': 'd', 'ṭ': 't', 'ṃ': 'm', 'ñ': 'n', 'ṛ': 'ri', 'ś': 'sh',
'Ś': 'Sh', 'ṣ': 'sh', 'Ṣ': 'Sh', 'ḥ': '', '-': '-', ' ': '-'}
roman_text = ''
for char in iast_text:
if char in swapTable:
roman_text += swapTable[char]
else:
roman_text += char
return roman_text.lower()
def tr(text, scr, titled=True):
# titled = True seems to be primarily for NOT TitleCasing IAST Shlokas...
if scr == 'hk':
scr = sanscript.HK
if text == '':
return ''
text = text.replace('~', '##~##') # Simple fix to prevent transliteration of ~
text_bits = text.split('|')
transliterated_text = []
if titled:
for t in text_bits:
t = t.rstrip('#~0123456789 ')
if t[:3] == 'ta:':
# Force Tamil!
if scr == sanscript.DEVANAGARI:
scr = sanscript.TAMIL
t = t[3:]
if scr == sanscript.TAMIL:
transliterated_text.append('\\tamil{%s}' %
sanscript.transliterate(data=t, _from=sanscript.HK, _to=scr).replace('C','Ch').replace('c','ch').title())
else:
transliterated_text.append(
sanscript.transliterate(data=t, _from=sanscript.HK, _to=scr).replace('C','Ch').replace('c','ch').title())
else:
if t.find('RIGHTarrow') == -1:
transliterated_text.append(
sanscript.transliterate(data=t, _from=sanscript.HK, _to=scr).replace('C','Ch').replace('c','ch').title())
else:
[txt, t1, arrow, t2] = t.split('\\')
transliterated_text.append(
'\\'.join([sanscript.transliterate(data=txt, _from=sanscript.HK, _to=scr).replace('C','Ch').replace('c','ch').title(),
t1, arrow, t2]))
else:
for t in text_bits:
t = t.rstrip('~0123456789 ')
if t[:3] == 'ta:':
# Force Tamil!
if scr == sanscript.DEVANAGARI:
scr = sanscript.TAMIL
t = t[3:]
transliterated_text.append(
sanscript.transliterate(data=t, _from=sanscript.HK, _to=scr).replace('C','Ch').replace('c','ch').strip("{}").title())
else:
if t.find('RIGHTarrow') == -1:
transliterated_text.append(sanscript.transliterate(data=t, _from=sanscript.HK, _to=scr))
else:
[txt, t1, arrow, t2] = t.split('\\')
transliterated_text.append(
'\\'.join([sanscript.transliterate(txt, _from=sanscript.HK, _to=scr),
t1, arrow, t2]))
return '|'.join(transliterated_text)
def sexastr2deci(sexa_str):
"""Converts as sexagesimal string to decimal
Converts a given sexagesimal string to its decimal value
Args:
A string encoding of a sexagesimal value, with the various
components separated by colons
Returns:
A decimal value corresponding to the sexagesimal string
Examples:
>>> sexastr2deci('15:30:00')
15.5
>>> sexastr2deci('-15:30:45')
-15.5125
"""
if sexa_str[0] == '-':
sgn = -1.0
dms = sexa_str[1:].split(':') # dms = degree minute second
else:
sgn = 1.0
dms = sexa_str.split(':')
decival = 0
for i in range(0, len(dms)):
decival = decival + float(dms[i]) / (60.0 ** i)
return decival * sgn
def revjul(jd, formatstr='%4d-%02d-%02d %02d:%02d:%02d', tz_off=0):
"""Returns a more human readable revjul compared to swe
Converts a given jd (float) to a tuple [y,m,d,h,mm,ss]
Args:
A float corresponding to a Julian day
Returns:
A tuple detailing the year, month, day, hour, minute and second
Examples:
>>> revjul(2444961.7125, None)
(1981, 12, 23, 5, 6, 0)
>>> revjul(2444961.7125)
'1981-12-23 05:06:00'
"""
if jd is None:
return None
year, month, day, h_float = swe.revjul(jd + tz_off / 24.0)
hour = floor(h_float)
h_float = (h_float - hour) * 60
minute = floor(h_float)
h_float = (h_float - minute) * 60
second = int(round(h_float))
if second == 60:
minute += 1
second = 0
if minute == 60:
hour += 1
minute = 0
if hour == 24:
year, month, day, _h = swe.revjul(jd + (tz_off + 1) / 24.0)
if formatstr is None:
return (year, month, day, hour, minute, second)
else:
return (formatstr % (year, month, day, hour, minute, second))
def print_lat_lon(lat, lon):
"""Returns a formatted string for a latitude and longitude
Returns a formatted string for latitude and longitude, given sexagesimal
'strings' using colons for separation
Args:
str latstr
str lonstr
Returns:
string corresponding to the formatted latitude and longitude
Examples:
>>> print_lat_lon('13:05:24','80:16:12') #Chennai
"13°05'24''N, 80°16'12''E"
>>> print_lat_lon('37:23:59','-122:08:34') #Palo Alto
"37°23'59''N, 122°08'34''W"
>>> print_lat_lon(1, -1)
"1°0'0''N, 1°0'0''W"
"""
if lat < 0:
lat = -lat
lat_suffix = 'S'
else:
lat_suffix = 'N'
if lon < 0:
lon = -lon
lon_suffix = 'W'
else:
lon_suffix = 'E'
return '%.6f°%s, %.6f°%s' % (lat, lat_suffix, lon, lon_suffix)
def longitudeToRightAscension(longitude):
return (360 - longitude) / 360 * 24
| 29.753623
| 142
| 0.524436
|
b934ce455e4d77c9d7a39876a369462839e6ae20
| 2,586
|
py
|
Python
|
housepricesadv/data/make_dataset.py
|
chritter/housepricesadv
|
b1d17a7aa962855c34288874aaedb1abc7f7f58d
|
[
"MIT"
] | null | null | null |
housepricesadv/data/make_dataset.py
|
chritter/housepricesadv
|
b1d17a7aa962855c34288874aaedb1abc7f7f58d
|
[
"MIT"
] | null | null | null |
housepricesadv/data/make_dataset.py
|
chritter/housepricesadv
|
b1d17a7aa962855c34288874aaedb1abc7f7f58d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
import click
from dotenv import find_dotenv, load_dotenv
from sklearn import model_selection
import pandas as pd
def create_fold_file(file_in, file_out, n_splits=5, stratify=None):
"""
takes training file (e.g. train.csv) and produces new file
with additional column kfold, indicating the validation folds!
Parameters
----------
file_in : _type_
_description_
file_out : _type_
_description_
n_splits : int, optional
_description_, by default 5
stratify : _type_, optional
_description_, by default None
Examples
--------
>>> a = [1, 2, 3]
>>> print([x + 3 for x in a])
[4, 5, 6]
"""
train = pd.read_csv(file_in,
keep_default_na=False, na_values=[""])
# ensure training data is shuffled, do this outside/before of the folders
train = train.sample(frac=1, random_state=42).reset_index(drop=True)
if stratify:
folder = model_selection.StratifiedKFold(n_splits=n_splits)
else:
folder = model_selection.KFold(n_splits=n_splits)
for n_fold, (_, valid_idx) in enumerate(folder.split(X=train, y=stratify)):
train.loc[valid_idx, "kfold"] = n_fold
train['kfold'] = train['kfold'].astype(int)
#fname = Path(file_in).stem
# suffix = Path(file_in).suffix
train.to_csv(file_out, index=False)
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
"""
Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
Parameters
----------
input_filepath : _type_
_description_
output_filepath : _type_
_description_
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
create_fold_file(input_filepath, output_filepath,
n_splits=5, stratify=None)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| 28.733333
| 79
| 0.656226
|
04e18a5cb48b4a3d29f52f04951c0fde0d43df33
| 4,780
|
py
|
Python
|
schemas/data.py
|
CIMAC-CIDC/cidc-ingestion-api
|
1877907119efd30c7698ecc7c5da84f877ba02d7
|
[
"MIT"
] | null | null | null |
schemas/data.py
|
CIMAC-CIDC/cidc-ingestion-api
|
1877907119efd30c7698ecc7c5da84f877ba02d7
|
[
"MIT"
] | 26
|
2019-01-30T16:13:42.000Z
|
2019-05-28T19:58:41.000Z
|
schemas/data.py
|
CIMAC-CIDC/cidc-ingestion-api
|
1877907119efd30c7698ecc7c5da84f877ba02d7
|
[
"MIT"
] | null | null | null |
"""
Data schema, each record represents a file in a google bucket.
"""
from schemas.fastq_schema import FASTQ_SCHEMA
DATA = {
'public_methods': [],
'resource_methods': ['GET'],
'item_methods': ['GET'],
'allowed_roles': ['admin', 'user', 'uploader', 'system'],
'allowed_item_roles': ['admin', 'user', 'uploader', 'system'],
'datasource': {
'source': 'data',
'filter': {
'visibility': True
},
},
'schema': {
'data_format': {
"type": "string",
"required": True,
},
'file_name': {
'type': 'string',
'required': True,
},
'file_size': {
'type': 'integer',
'required': True
},
'sample_ids': {
'type': 'list',
'schema': {
'type': 'string',
'required': True
}
},
'number_of_samples': {
'type': 'integer',
'required': True
},
'trial': {
'type': 'objectid',
'required': True,
},
'trial_name': {
'type': 'string',
'required': True,
},
'gs_uri': {
'type': 'string',
'required': True,
},
'assay': {
'type': 'objectid',
'required': True,
},
'experimental_strategy': {
'type': 'string',
'required': True,
},
'date_created': {
'type': 'string',
'required': True,
},
'analysis_id': {
'type': 'objectid',
},
'mapping': {
'type': 'string',
'required': True,
},
'processed': {
'type': 'boolean'
},
'visibility': {
'type': 'boolean'
},
'uuid_alias': {
'type': 'string',
'required': True,
},
'children': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'_id': {
'type': 'objectid',
'required': True
},
'resource': {
'type': 'string',
'required': True
}
}
}
},
'fastq_properties': {
'type': 'dict',
'nullable': True,
'schema': FASTQ_SCHEMA
},
'download_link': {
'type': 'string',
'nullable': True
}
}
}
DATA_EDIT = {
"public_methods": [],
"allowed_roles": ["admin", "system"],
"allowed_item_roles": ["admin", "system"],
"resource_methods": ["POST"],
"item_methods": ["PATCH", "DELETE"],
"datasource": {
'source': 'data',
},
"schema": DATA["schema"]
}
DATA_TOGGLE_VIS = {
"public_methods": [],
"allowed_roles": ["admin", "user", "uploader", "system"],
"allowed_item_roles": ["admin", "user", "uploader", "system"],
"resource_methods": ["GET"],
"item_methods": ["PATCH"],
"datasource": {
"source": "data",
"projection": {
"visibility": 1
}
},
"schema": {
"visibility": {
"type": "boolean"
}
}
}
DATA_AGG_INPUTS = {
'allowed_roles': ["admin", "system"],
'allowed_item_roles': ["admin", "system"],
'datasource': {
'source': 'data',
'aggregation': {
'pipeline': [
{
"$match": {
"mapping": {
"$in": "$inputs"
},
"processed": False,
"visibility": True
}
},
{
"$group": {
"_id": {
"sample_ids": "$sample_ids",
"assay": "$assay",
"trial": "$trial",
"experimental_strategy": "$experimental_strategy",
"trial_name": "$trial_name"
},
"records": {
"$push": {
"file_name": "$file_name",
"gs_uri": "$gs_uri",
"mapping": "$mapping",
"data_format": "$data_format",
'_id': '$_id'
}
}
}
}
]
}
}
}
| 26.120219
| 78
| 0.347908
|
4aa1820359d1429c99b74230f41f07ac6c48b4ec
| 5,448
|
py
|
Python
|
tests/unit/test_parse.py
|
benknoble/Coqtail
|
c4d5c58771dd854671bd26b94693ecc1ffa21e39
|
[
"MIT"
] | null | null | null |
tests/unit/test_parse.py
|
benknoble/Coqtail
|
c4d5c58771dd854671bd26b94693ecc1ffa21e39
|
[
"MIT"
] | null | null | null |
tests/unit/test_parse.py
|
benknoble/Coqtail
|
c4d5c58771dd854671bd26b94693ecc1ffa21e39
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-
# Author: Wolf Honore
"""Sentence parsing unit tests."""
from __future__ import absolute_import, division, print_function
import pytest
from coqtail import NoDotError, UnmatchedError, _get_message_range, _strip_comments
# Test Values #
tests = (
# Valid tests, no offset
("word", ["A."], (0, 1)),
("word2", ["A B."], (0, 3)),
("lwhite", [" A."], (0, 2)),
("rwhite", ["A. "], (0, 1)),
("comment pre", ["(* c. *) A."], (0, 10)),
("comment mid", ["A (* c. *) B."], (0, 12)),
("comment post", ["A (* c. *)."], (0, 10)),
("comment nest", ["A (* (* c. *) *)."], (0, 16)),
("str", ['A "B.".'], (0, 6)),
("str nest", ['A """B.""".'], (0, 10)),
("qualified", ["A.B."], (0, 3)),
("multi line", ["A", "B."], (1, 1)),
("multi line comment", ["A (*", ". *) B."], (1, 6)),
("multi line string", ['A "', '." B.'], (1, 4)),
("multi line comment nest", ["A (* (*", "c. ", "*) *) ."], (2, 6)),
("extra words", ["A. B."], (0, 1)),
("bullet -", ["- A."], (0, 0)),
("bullet +", ["+ A."], (0, 0)),
("bullet *", ["* A."], (0, 0)),
("bullet --", ["-- A."], (0, 1)),
("bullet ++", ["++ A."], (0, 1)),
("bullet **", ["** A."], (0, 1)),
("bullet {", ["{ A. }"], (0, 0)),
("bullet {{", ["{{ A. }}"], (0, 0)),
("bullet {{ 2", ["{{ A. }}"], (0, 1), (0, 1)),
("dot3", ["A..."], (0, 3)),
("large space", ("A" + ("\n" * 5000) + ".").split("\n"), (5000, 0)),
("large comment", ("(*" + ("\n" * 5000) + "*) A.").split("\n"), (5000, 4)),
("attribute word", ["#[A] B."], (0, 6)),
("attribute bullet {", ["#[A] { A. }"], (0, 5)),
("attribute string", ['#[A="B]."] C.'], (0, 12)),
# Accept (tactic in *)
("star paren ok", ["A *) ."], (0, 5)),
# or a bullet followed by a tactic notation that starts with ')'
("star paren ok post comment", ["(* A *) *) ."], (0, 8)),
# Valid tests, offset
("bullet }", ["{ A. }"], (0, 4), (0, 5)),
("bullet dot } 1", ["{ A. }."], (0, 4), (0, 5)),
("bullet dot } 2", ["{ A. }."], (0, 6), (0, 6)),
# Valid tests for non-bracketed goal selectors
("select no spacing", ["1:t."], (0, 3)),
("select space after colon", ["1: t."], (0, 4)),
("select space before space after", ["1 : t."], (0, 5)),
("select space before colon", ["1 :t."], (0, 4)),
# Valid tests with bracketed goal selectors
("focus no spacing", ["1:{"], (0, 2)),
("focus trailing spacing", ["1:{ "], (0, 2)),
("focus space after colon", ["1: {"], (0, 3)),
("focus space before space after", ["1 : {"], (0, 4)),
("focus double space before double space after", ["1 : {"], (0, 6)),
("focus space before colon", ["1 :{"], (0, 3)),
("focus trailing command no spaces", ["2:{t."], (0, 2)),
("focus trailing command with spaces", ["2 : { t."], (0, 4)),
# Invalid tests
("no dot", ["A"], (NoDotError, None)),
("dot2", ["A.."], (NoDotError, None)),
("unclosed comment pre", ["(* ."], (UnmatchedError, (0, 0))),
("unclosed comment", ["A (* ."], (UnmatchedError, (0, 2))),
("unclosed comment nest pre", ["(* (* A *) ."], (UnmatchedError, (0, 0))),
("unclosed string", ['A " .'], (UnmatchedError, (0, 2))),
("unclosed attribute", ["#[A B."], (UnmatchedError, (0, 0))),
("unclosed string attribute", ['#[A="B] C.'], (UnmatchedError, (0, 0))),
("only white", [" "], (NoDotError, None)),
("empty", [""], (NoDotError, None)),
)
# Default 'start' to (0, 0)
tests = (
(
t[0],
list(map(lambda s: s.encode("utf-8"), t[1])),
t[2] if len(t) == 4 else (0, 0),
t[3] if len(t) == 4 else t[2],
)
for t in tests
)
# Test Cases #
@pytest.mark.parametrize("_name, lines, start, stop", tests)
def test_parse(_name, lines, start, stop):
"""'_get_message_range(lines)' should range from 'start' to 'stop'."""
if isinstance(stop[0], int):
assert _get_message_range(lines, start) == {"start": start, "stop": stop}
else:
ex, stop = stop
with pytest.raises(ex) as e:
_get_message_range(lines, start)
if stop is not None:
assert e.value.range[0] == stop
com_tests = (
("no comment", b"abc", (b"abc", [])),
("pre", b"(*abc*)def", (b" def", [[0, 7]])),
("mid", b"ab(* c *)de", (b"ab de", [[2, 7]])),
("post", b"abc(*def *)", (b"abc", [[3, 8]])),
(
"multi",
b"abc (* com1 *) def (*com2 *) g",
(b"abc def g", [[4, 10], [20, 9]]),
),
("nested", b"abc (* c1 (*c2 (*c3*) (*c4*) *) *)def", (b"abc def", [[4, 30]])),
("no comment newline", b"\nabc\n\n", (b"\nabc\n\n", [])),
("pre newline", b"(*ab\nc*)d\nef", (b" d\nef", [[0, 8]])),
("mid newline", b"ab(* c *)\nde", (b"ab \nde", [[2, 7]])),
("post newline", b"abc\n(*def *)\n", (b"abc\n \n", [[4, 8]])),
(
"multi newline",
b"abc (* com1 *)\n def \n(*\ncom2 *) g",
(b"abc \n def \n g", [[4, 10], [21, 10]]),
),
(
"nested newline",
b"\nabc (* c1 (*c2 \n\n(*c3\n*) (*c4*) *) *)def\n",
(b"\nabc def\n", [[5, 33]]),
),
("star paren", b"abc *)", (b"abc *)", [])),
("star paren post comment", b"(*abc*) *)", (b" *)", [[0, 7]])),
)
@pytest.mark.parametrize("_name, msg, expected", com_tests)
def test_strip_comment(_name, msg, expected):
"""_strip_comments() should remove only comments"""
assert _strip_comments(msg) == expected
| 38.914286
| 83
| 0.449706
|
d8d33fcdd9864a2e17f3d6cc2ac72cb64d4bcffb
| 17,664
|
py
|
Python
|
benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/11-sender_receiver_14.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/11-sender_receiver_14.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/11-sender_receiver_14.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1)))
hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 38.907489
| 89
| 0.573143
|
272d33d736943b6520b0411896ffec62d09a0b7a
| 45,757
|
py
|
Python
|
eventsourcing/domain.py
|
johnbywater/eventsourcing
|
83ec6aea90c3ffaf5021119c741c6b97361c4ea1
|
[
"BSD-3-Clause"
] | 972
|
2015-09-16T02:03:44.000Z
|
2021-10-13T15:10:38.000Z
|
eventsourcing/domain.py
|
johnbywater/eventsourcing
|
83ec6aea90c3ffaf5021119c741c6b97361c4ea1
|
[
"BSD-3-Clause"
] | 207
|
2015-10-13T15:46:29.000Z
|
2021-10-08T07:23:40.000Z
|
eventsourcing/domain.py
|
johnbywater/eventsourcing
|
83ec6aea90c3ffaf5021119c741c6b97361c4ea1
|
[
"BSD-3-Clause"
] | 117
|
2015-10-13T13:24:56.000Z
|
2021-10-12T07:19:47.000Z
|
import inspect
import os
from abc import ABC, ABCMeta
from dataclasses import dataclass
from datetime import datetime, tzinfo
from types import FunctionType, WrapperDescriptorType
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from uuid import UUID, uuid4
from eventsourcing.utils import get_method_name, get_topic, resolve_topic
# noinspection SpellCheckingInspection
TZINFO: tzinfo = resolve_topic(os.getenv("TZINFO_TOPIC", "datetime:timezone.utc"))
class MetaDomainEvent(ABCMeta):
def __new__(
mcs, name: str, bases: Tuple[type, ...], cls_dict: Dict[str, Any]
) -> "MetaDomainEvent":
event_cls = super().__new__(mcs, name, bases, cls_dict)
event_cls = dataclass(frozen=True)(event_cls) # type: ignore
return event_cls
T = TypeVar("T")
@dataclass(frozen=True)
class DomainEvent(ABC, Generic[T]):
"""
Base class for domain events, such as aggregate :class:`AggregateEvent`
and aggregate :class:`Snapshot`.
"""
originator_id: UUID
originator_version: int
timestamp: datetime
def mutate(self, aggregate: Optional[T]) -> Optional[T]:
"""Abstract mutator method."""
@staticmethod
def create_timestamp() -> datetime:
return datetime.now(tz=TZINFO)
TDomainEvent = TypeVar("TDomainEvent", bound=DomainEvent[Any])
TAggregate = TypeVar("TAggregate", bound="Aggregate")
class AggregateEvent(DomainEvent[TAggregate], metaclass=MetaDomainEvent):
"""
Base class for aggregate events. Subclasses will model
decisions made by the domain model aggregates.
"""
def mutate(self, aggregate: Optional[TAggregate]) -> Optional[TAggregate]:
"""
Changes the state of the aggregate
according to domain event attributes.
"""
assert aggregate is not None
# Check this event belongs to this aggregate.
if self.originator_id != aggregate.id:
raise OriginatorIDError(self.originator_id, aggregate.id)
# Check this event is the next in its sequence.
next_version = aggregate.version + 1
if self.originator_version != next_version:
raise OriginatorVersionError(self.originator_version, next_version)
# Call apply() before mutating values, in case exception is raised.
self.apply(aggregate)
# Update the aggregate version.
aggregate.version = self.originator_version
# Update the modified time.
aggregate.modified_on = self.timestamp
# Return the mutated aggregate.
return aggregate
def apply(self, aggregate: TAggregate) -> None:
"""
Applies the domain event to the aggregate.
"""
class AggregateCreated(AggregateEvent[TAggregate]):
# noinspection PyUnresolvedReferences
"""
Domain event for when aggregate is created.
Constructor arguments:
:param UUID originator_id: ID of originating aggregate.
:param int originator_version: version of originating aggregate.
:param datetime timestamp: date-time of the event
:param str originator_topic: topic for the aggregate class
"""
originator_topic: str
def mutate(self, aggregate: Optional[TAggregate]) -> Optional[TAggregate]:
"""
Constructs aggregate instance defined
by domain event object attributes.
"""
assert aggregate is None
# Resolve originator topic.
aggregate_class: Type[TAggregate] = resolve_topic(
self.__dict__["originator_topic"]
)
# Construct and return aggregate object.
agg = aggregate_class.__new__(aggregate_class)
# Separate the base class keywords arguments.
base_kwargs = _select_kwargs_mentioned_in_sig(self.__dict__, agg.__base_init__)
# Call the base class init method.
agg.__base_init__(**base_kwargs)
# Select values that aren't mentioned in the method signature.
init_kwargs = _select_kwargs_mentioned_in_sig(
self.__dict__, agg.__init__ # type: ignore
)
# Provide the id, if the init method expects it.
if aggregate_class in _init_mentions_id:
init_kwargs["id"] = self.__dict__["originator_id"]
# Call the aggregate class init method.
agg.__init__(**init_kwargs) # type: ignore
self.apply(agg)
return agg
def _select_kwargs_mentioned_in_sig(
kwargs: Dict[str, Any], method: Callable[..., Any]
) -> Dict[str, Any]:
method_signature = inspect.signature(method)
names = set(method_signature.parameters)
return {k: v for k, v in kwargs.items() if k in names}
EventSpecType = Optional[Union[str, Type[AggregateEvent[Any]]]]
AnyCallable = Callable[..., None]
DecoratedObjType = TypeVar("DecoratedObjType", bound=Union[AnyCallable, property])
InjectEventType = bool
class CommandMethodDecorator:
def __init__(
self,
event_spec: EventSpecType,
decorated_obj: DecoratedObjType,
):
self.is_name_inferred_from_method = False
self.given_event_cls: Optional[Type[AggregateEvent[Any]]] = None
self.event_cls_name: Optional[str] = None
self.decorated_property: Optional[property] = None
self.is_property_setter = False
self.property_setter_arg_name: Optional[str] = None
self.decorated_method: AnyCallable
# Event name has been specified.
if isinstance(event_spec, str):
if event_spec == "":
raise ValueError("Can't use empty string as name of event class")
self.event_cls_name = event_spec
# Event class has been specified.
elif isinstance(event_spec, type) and issubclass(event_spec, AggregateEvent):
if event_spec in given_event_classes:
name = event_spec.__name__
raise TypeError(f"{name} event class used in more than one decorator")
self.given_event_cls = event_spec
given_event_classes.add(event_spec)
# Process a decorated property.
if isinstance(decorated_obj, property):
# Disallow putting event decorator on property getter.
if decorated_obj.fset is None:
assert decorated_obj.fget, "Property has no getter"
method_name = decorated_obj.fget.__name__
raise TypeError(
f"@event can't decorate {method_name}() property getter"
)
# Remember we are decorating a property.
self.decorated_property = decorated_obj
# Remember the decorated method as the "setter" of the property.
self.decorated_method = decorated_obj.fset
assert isinstance(self.decorated_method, FunctionType)
# Disallow deriving event class names from property names.
if not self.given_event_cls and not self.event_cls_name:
method_name = self.decorated_method.__name__
raise TypeError(
f"@event on {method_name}() setter requires event name or class"
)
# Remember the name of the second setter arg.
setter_arg_names = list(inspect.signature(self.decorated_method).parameters)
assert len(setter_arg_names) == 2
self.property_setter_arg_name = setter_arg_names[1]
# Process a decorated method.
elif isinstance(decorated_obj, FunctionType):
# Remember the decorated method as the decorated object.
self.decorated_method = decorated_obj
# If necessary, derive an event class name from the method.
if not self.given_event_cls and not self.event_cls_name:
original_method_name = self.decorated_method.__name__
if original_method_name != "__init__":
self.is_name_inferred_from_method = True
self.event_cls_name = "".join(
[s.capitalize() for s in original_method_name.split("_")]
)
# Disallow decorating other types of object.
else:
raise TypeError(f"{decorated_obj} is not a function or property")
# Disallow using methods with variable params to define event class.
if self.event_cls_name:
_check_no_variable_params(self.decorated_method)
def __call__(self, *args: Any, **kwargs: Any) -> None:
# Initialised decorator was called directly, presumably by
# a decorating property that has this decorator as its fset.
# So trigger an event.
assert self.is_property_setter
assert self.property_setter_arg_name
assert len(args) == 2
assert len(kwargs) == 0
assert isinstance(args[0], Aggregate)
aggregate_instance = args[0]
bound = BoundCommandMethodDecorator(self, aggregate_instance)
property_setter_arg_value = args[1]
kwargs = {self.property_setter_arg_name: property_setter_arg_value}
bound.trigger(**kwargs)
@overload
def __get__(
self, instance: None, owner: "MetaAggregate"
) -> Union["UnboundCommandMethodDecorator", property]:
... # pragma: no cover
@overload
def __get__(
self, instance: "Aggregate", owner: "MetaAggregate"
) -> Union["BoundCommandMethodDecorator", Any]:
... # pragma: no cover
def __get__(
self, instance: Optional["Aggregate"], owner: "MetaAggregate"
) -> Union[
"BoundCommandMethodDecorator", "UnboundCommandMethodDecorator", property, Any
]:
# If we are decorating a property, then delegate to the property's __get__.
if self.decorated_property:
return self.decorated_property.__get__(instance, owner)
# Return a "bound" command method decorator if we have an instance.
elif instance:
return BoundCommandMethodDecorator(self, instance)
# Return an "unbound" command method decorator if we have no instance.
else:
return UnboundCommandMethodDecorator(self)
def __set__(self, instance: "Aggregate", value: Any) -> None:
# Set decorated property indirectly by triggering an event.
assert self.property_setter_arg_name
b = BoundCommandMethodDecorator(self, instance)
kwargs = {self.property_setter_arg_name: value}
b.trigger(**kwargs)
# Called because specifying decorator params.
@overload
def event(arg: EventSpecType = None) -> Callable[[DecoratedObjType], DecoratedObjType]:
... # pragma: no cover
# Called because Python is actually decorating something.
@overload
def event(arg: DecoratedObjType) -> DecoratedObjType:
... # pragma: no cover
def event(
arg: Union[EventSpecType, DecoratedObjType] = None,
) -> Union[Callable[[DecoratedObjType], DecoratedObjType], DecoratedObjType]:
"""
Can be used to decorate an aggregate method so that when the
method is called an event is triggered. The body of the method
will be used to apply the event to the aggregate, both when the
event is triggered and when the aggregate is reconstructed from
stored events.
.. code-block:: python
class MyAggregate(Aggregate):
@event("NameChanged")
def set_name(self, name: str):
self.name = name
...is equivalent to...
.. code-block:: python
class MyAggregate(Aggregate):
def set_name(self, name: str):
self.trigger_event(self.NameChanged, name=name)
class NameChanged(Aggregate.Event):
name: str
def apply(self, aggregate):
aggregate.name = self.name
In the example above, the event "NameChanged" is defined automatically
by inspecting the signature of the `set_name()` method. If it is
preferred to declare the event class explicitly, for example to define
upcasting of old events, the event class itself can be mentioned in the
event decorator rather than just providing the name of the event as a
string.
.. code-block:: python
class MyAggregate(Aggregate):
class NameChanged(Aggregate.Event):
name: str
@event(NameChanged)
def set_name(self, name: str):
aggregate.name = self.name
"""
if isinstance(arg, (FunctionType, property)):
command_method_decorator = CommandMethodDecorator(
event_spec=None,
decorated_obj=arg,
)
return cast(
Callable[[DecoratedObjType], DecoratedObjType], command_method_decorator
)
elif (
arg is None
or isinstance(arg, str)
or isinstance(arg, type)
and issubclass(arg, AggregateEvent)
):
event_spec = arg
def create_command_method_decorator(
decorated_obj: DecoratedObjType,
) -> DecoratedObjType:
command_method_decorator = CommandMethodDecorator(
event_spec=event_spec,
decorated_obj=decorated_obj,
)
return cast(DecoratedObjType, command_method_decorator)
return create_command_method_decorator
else:
raise TypeError(
f"{arg} is not a str, aggregate event class, function, or property"
)
triggers = event
class UnboundCommandMethodDecorator:
"""
Wraps an EventDecorator instance when attribute is accessed
on an aggregate class.
"""
def __init__(self, event_decorator: CommandMethodDecorator):
"""
:param CommandMethodDecorator event_decorator:
"""
self.event_decorator = event_decorator
assert event_decorator.decorated_method
self.__qualname__ = event_decorator.decorated_method.__qualname__
self.__name__ = event_decorator.decorated_method.__name__
class BoundCommandMethodDecorator:
"""
Wraps an EventDecorator instance when attribute is accessed
on an aggregate so that the aggregate methods can be accessed.
"""
def __init__(
self, event_decorator: CommandMethodDecorator, aggregate: "TAggregate"
):
"""
:param CommandMethodDecorator event_decorator:
:param Aggregate aggregate:
"""
assert event_decorator.decorated_method
self.event_decorator = event_decorator
self.__qualname__ = event_decorator.decorated_method.__qualname__
self.__name__ = event_decorator.decorated_method.__name__
self.aggregate = aggregate
def trigger(self, *args: Any, **kwargs: Any) -> None:
assert isinstance(self.event_decorator, CommandMethodDecorator) # for PyCharm
assert self.event_decorator.decorated_method
kwargs = _coerce_args_to_kwargs(
self.event_decorator.decorated_method, args, kwargs
)
event_cls = decorated_event_classes[self.event_decorator]
kwargs = _select_kwargs_mentioned_in_sig(kwargs, event_cls.__dict__["__init__"])
self.aggregate.trigger_event(event_cls, **kwargs)
def __call__(self, *args: Any, **kwargs: Any) -> None:
self.trigger(*args, **kwargs)
given_event_classes: Set[type] = set()
decorated_methods: Dict[type, AnyCallable] = {}
aggregate_has_many_created_event_classes: Dict[type, List[str]] = {}
class DecoratedEvent(AggregateEvent[Any]):
def apply(self, aggregate: "TAggregate") -> None:
"""
Applies event to aggregate by calling method decorated by @event.
"""
# Call super method, just in case any base classes need it.
super().apply(aggregate)
# Identify the method that was decorated.
decorated_method = decorated_methods[type(self)]
# Select event attributes mentioned in method signature.
kwargs = _select_kwargs_mentioned_in_sig(self.__dict__, decorated_method)
# Call the original method with event attribute values.
decorated_method(aggregate, **kwargs)
decorated_event_classes: Dict[CommandMethodDecorator, Type[DecoratedEvent]] = {}
def _check_no_variable_params(method: FunctionType) -> None:
for param in inspect.signature(method).parameters.values():
if param.kind is param.VAR_POSITIONAL:
raise TypeError(
f"*{param.name} not supported by decorator on {method.__name__}()"
)
# Todo: Support VAR_POSITIONAL?
# annotations["__star_args__"] = "typing.Any"
if param.kind is param.VAR_KEYWORD:
# Todo: Support VAR_KEYWORD?
# annotations["__star_kwargs__"] = "typing.Any"
raise TypeError(
f"**{param.name} not supported by decorator on {method.__name__}()"
)
def _coerce_args_to_kwargs(
method: AnyCallable,
args: Iterable[Any],
kwargs: Dict[str, Any],
expects_id: bool = False,
) -> Dict[str, Any]:
assert isinstance(method, (FunctionType, WrapperDescriptorType))
method_signature = inspect.signature(method)
copy_kwargs = dict(kwargs)
args = tuple(args)
positional_names = []
keyword_defaults = {}
required_positional = []
required_keyword_only = []
if expects_id:
positional_names.append("id")
required_positional.append("id")
for name, param in method_signature.parameters.items():
if name == "self":
continue
# elif param.kind in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD):
if param.kind is param.KEYWORD_ONLY:
required_keyword_only.append(name)
if param.kind is param.POSITIONAL_OR_KEYWORD:
positional_names.append(name)
if param.default == param.empty:
required_positional.append(name)
if param.default != param.empty:
keyword_defaults[name] = param.default
# if not required_keyword_only and not positional_names:
# if args or kwargs:
# raise TypeError(f"{method.__name__}() takes no args")
for name in kwargs:
if name not in required_keyword_only and name not in positional_names:
raise TypeError(
f"{get_method_name(method)}() got an unexpected "
f"keyword argument '{name}'"
)
counter = 0
len_args = len(args)
if len_args > len(positional_names):
msg = (
f"{get_method_name(method)}() takes {len(positional_names) + 1} "
f"positional argument{'' if len(positional_names) + 1 == 1 else 's'} "
f"but {len_args + 1} were given"
)
raise TypeError(msg)
required_positional_not_in_kwargs = [
n for n in required_positional if n not in kwargs
]
num_missing = len(required_positional_not_in_kwargs) - len_args
if num_missing > 0:
missing_names = [
f"'{name}'" for name in required_positional_not_in_kwargs[len_args:]
]
msg = (
f"{get_method_name(method)}() missing {num_missing} required positional "
f"argument{'' if num_missing == 1 else 's'}: "
)
raise_missing_names_type_error(missing_names, msg)
for name in positional_names:
if counter + 1 > len_args:
break
if name not in kwargs:
copy_kwargs[name] = args[counter]
counter += 1
else:
raise TypeError(
f"{get_method_name(method)}() got multiple values for argument '{name}'"
)
missing_keyword_only_arguments = []
for name in required_keyword_only:
if name not in kwargs:
missing_keyword_only_arguments.append(name)
if missing_keyword_only_arguments:
missing_names = [f"'{name}'" for name in missing_keyword_only_arguments]
msg = (
f"{get_method_name(method)}() missing {len(missing_names)} "
f"required keyword-only argument"
f"{'' if len(missing_names) == 1 else 's'}: "
)
raise_missing_names_type_error(missing_names, msg)
for name, value in keyword_defaults.items():
if name not in copy_kwargs:
copy_kwargs[name] = value
return copy_kwargs
def raise_missing_names_type_error(missing_names: List[str], msg: str) -> None:
msg += missing_names[0]
if len(missing_names) == 2:
msg += f" and {missing_names[1]}"
elif len(missing_names) > 2:
msg += ", " + ", ".join(missing_names[1:-1])
msg += f", and {missing_names[-1]}"
raise TypeError(msg)
TT = TypeVar("TT", bound="type")
_annotations_mention_id: Set["MetaAggregate"] = set()
_init_mentions_id: Set["MetaAggregate"] = set()
class MetaAggregate(ABCMeta):
INITIAL_VERSION = 1
class Event(AggregateEvent[TAggregate]):
pass
class Created(Event[TAggregate], AggregateCreated[TAggregate]):
pass
_created_event_class: Type[AggregateCreated[Any]]
def __new__(mcs: Type[TT], *args: Any, **kwargs: Any) -> TT:
try:
class_annotations = args[2]["__annotations__"]
except KeyError:
class_annotations = None
annotations_mention_id = False
else:
try:
class_annotations.pop("id")
except KeyError:
annotations_mention_id = False
else:
annotations_mention_id = True
cls = ABCMeta.__new__(mcs, *args)
if class_annotations:
cls = dataclass(eq=False, repr=False)(cls)
if annotations_mention_id:
_annotations_mention_id.add(cls)
return cls
def __init__(
cls,
*args: Any,
created_event_name: Optional[str] = None,
) -> None:
super().__init__(*args)
# Identify or define a base event class for this aggregate.
base_event_name = "Event"
try:
base_event_cls = cls.__dict__[base_event_name]
except KeyError:
base_event_cls = cls._define_event_class(
base_event_name, (cls.Event,), None
)
setattr(cls, base_event_name, base_event_cls)
# Make sure all events defined on aggregate subclass the base event class.
for name, value in tuple(cls.__dict__.items()):
if name == base_event_name:
# Don't subclass the base event class again.
continue
if name.lower() == name:
# Don't subclass lowercase named attributes that have classes.
continue
if isinstance(value, type) and issubclass(value, AggregateEvent):
if not issubclass(value, base_event_cls):
sub_class = cls._define_event_class(
name, (value, base_event_cls), None
)
setattr(cls, name, sub_class)
# Identify or define the aggregate's "created" event class.
created_event_class: Optional[Type[AggregateCreated[Any]]] = None
# Has the "created" event class been indicated with '_created_event_class'.
if "_created_event_class" in cls.__dict__:
created_event_class = cls.__dict__["_created_event_class"]
if isinstance(created_event_class, type) and issubclass(
created_event_class, AggregateCreated
):
# We just subclassed the event classes, so reassign this.
created_event_class = getattr(cls, created_event_class.__name__)
assert created_event_class
cls._created_event_class = created_event_class
else:
raise TypeError(
f"{created_event_class} not subclass of {AggregateCreated.__name__}"
)
# Disallow using both '_created_event_class' and 'created_event_name'.
if created_event_class and created_event_name:
raise TypeError(
"Can't use both '_created_event_class' and 'created_event_name'"
)
# Is the init method method decorated with a CommandMethodDecorator?
if isinstance(cls.__dict__.get("__init__"), CommandMethodDecorator):
init_decorator: CommandMethodDecorator = cls.__dict__["__init__"]
# Set the original method on the class (un-decorate __init__).
cls.__init__ = init_decorator.decorated_method # type: ignore
# Disallow using both 'created_event_name' and '_created_event_class'.
if created_event_name:
raise TypeError(
"Can't use both 'created_event_name' and decorator on __init__"
)
elif created_event_class:
raise TypeError(
"Can't use both '_created_event_class' and decorator on __init__"
)
# Does the decorator specify a "create" event class?
if init_decorator.given_event_cls:
created_event_class = getattr(
cls, init_decorator.given_event_cls.__name__
)
if isinstance(created_event_class, type) and issubclass(
created_event_class, AggregateCreated
):
assert created_event_class
cls._created_event_class = created_event_class
else:
raise TypeError(
f"{created_event_class} not subclass of "
f"{AggregateCreated.__name__}"
)
# Does the decorator specify a "create" event name?
elif init_decorator.event_cls_name:
created_event_name = init_decorator.event_cls_name
# Disallow using decorator on __init__ without event spec.
else:
raise TypeError(
"Decorator on __init__ has neither event name nor class"
)
# Todo: Write a test to cover this when "Created" class is explicitly defined.
# Check if init mentions ID.
for param_name in inspect.signature(cls.__init__).parameters: # type: ignore
if param_name == "id":
_init_mentions_id.add(cls)
break
# If no "created" event class has been specified, find or create one.
if created_event_class is None:
# Discover all the "created" event classes already defined.
created_event_classes: Dict[str, Type[AggregateCreated[Any]]] = {}
for name, value in tuple(cls.__dict__.items()):
if isinstance(value, type) and issubclass(value, AggregateCreated):
created_event_classes[name] = value
# Is a "created" event class already defined that matches the name?
if created_event_name in created_event_classes:
cls._created_event_class = created_event_classes[created_event_name]
# If there is only one class defined, and we have no name, use it.
elif len(created_event_classes) == 1 and not created_event_name:
cls._created_event_class = next(iter(created_event_classes.values()))
# If there are no "created" event classes already defined, or a name is
# specified that hasn't matched, then define a "created" event class.
elif len(created_event_classes) == 0 or created_event_name:
# If no "created" event name has been specified, use default name.
if not created_event_name:
# This is safe because len(created_event_classes) == 0.
created_event_name = "Created"
# Disallow init method from having variable params if
# we are using it to define a "created" event class.
try:
init_method = cls.__dict__["__init__"]
except KeyError:
init_method = None
else:
try:
_check_no_variable_params(init_method)
except TypeError:
raise
# Define a "created" event class for this aggregate.
if issubclass(cls.Created, base_event_cls):
# Don't subclass from base event class twice.
bases: Tuple[type, ...] = (cls.Created,)
else:
bases = (cls.Created, base_event_cls)
event_cls = cls._define_event_class(
created_event_name,
bases,
init_method,
)
# Set the event class as an attribute of the aggregate class.
setattr(cls, created_event_name, event_cls)
# Remember which is the "created" event class.
cls._created_event_class = cast(Type[AggregateCreated[Any]], event_cls)
# Prepare to disallow ambiguity of choice between created event classes.
else:
aggregate_has_many_created_event_classes[cls] = list(
created_event_classes
)
# Prepare the subsequent event classes.
for attribute in tuple(cls.__dict__.values()):
# Watch out for @property that sits over an @event.
if isinstance(attribute, property) and isinstance(
attribute.fset, CommandMethodDecorator
):
attribute = attribute.fset
if attribute.is_name_inferred_from_method:
# We don't want name inferred from property (not past participle).
method_name = attribute.decorated_method.__name__
raise TypeError(
f"@event under {method_name}() property setter requires event "
f"class name"
)
# Attribute is a property decorating an event decorator.
attribute.is_property_setter = True
method_signature = inspect.signature(attribute.decorated_method)
assert len(method_signature.parameters) == 2
attribute.property_setter_arg_name = list(method_signature.parameters)[
1
]
# Attribute is an event decorator, so define a "decorated" event.
if isinstance(attribute, CommandMethodDecorator):
if attribute.given_event_cls:
# Check this is not a "created" event class.
if issubclass(attribute.given_event_cls, AggregateCreated):
raise TypeError(
f"{attribute.given_event_cls} "
f"is subclass of AggregateCreated"
)
# Define event class as subclass of given class.
given_subclass = getattr(cls, attribute.given_event_cls.__name__)
event_cls = cls._define_event_class(
attribute.given_event_cls.__name__,
(DecoratedEvent, given_subclass),
None,
)
else:
assert attribute.event_cls_name
# Check event class isn't already defined.
if attribute.event_cls_name in cls.__dict__:
raise TypeError(
f"{attribute.event_cls_name} "
f"event already defined on {cls.__name__}"
)
# Define event class from signature of original method.
event_cls = cls._define_event_class(
attribute.event_cls_name,
(DecoratedEvent, base_event_cls),
attribute.decorated_method,
)
# Cache the decorated method for the event class to use.
decorated_methods[event_cls] = attribute.decorated_method
# Set the event class as an attribute of the aggregate class.
setattr(cls, event_cls.__name__, event_cls)
# Remember which event class to trigger.
decorated_event_classes[attribute] = cast(
Type[DecoratedEvent], event_cls
)
# Check any create_id method defined on this class is static or class method.
if "create_id" in cls.__dict__:
if not isinstance(cls.__dict__["create_id"], (staticmethod, classmethod)):
raise TypeError(
f"{cls.create_id} is not a static or class method: "
f"{type(cls.create_id)}"
)
# Get the parameters of the create_id method that will be used by this class.
cls._create_id_param_names: List[str] = []
for name, param in inspect.signature(cls.create_id).parameters.items():
if param.kind in [param.KEYWORD_ONLY, param.POSITIONAL_OR_KEYWORD]:
cls._create_id_param_names.append(name)
# Define event classes for all events on bases.
for aggregate_base_class in args[1]:
for name, value in aggregate_base_class.__dict__.items():
if (
isinstance(value, type)
and issubclass(value, AggregateEvent)
and name not in cls.__dict__
and name.lower() != name
):
sub_class = cls._define_event_class(
name, (base_event_cls, value), None
)
setattr(cls, name, sub_class)
def _define_event_class(
cls,
name: str,
bases: Tuple[type, ...],
apply_method: Optional[AnyCallable],
) -> type:
# Define annotations for the event class (specs the init method).
annotations = {}
if apply_method is not None:
method_signature = inspect.signature(apply_method)
supers = {
s for b in bases for s in b.__mro__ if hasattr(s, "__annotations__")
}
super_annotations = {a for s in supers for a in s.__annotations__}
for param_name, param in list(method_signature.parameters.items())[1:]:
# Don't define 'id' on a "created" class.
if param_name == "id" and apply_method.__name__ == "__init__":
continue
# Don't override super class annotations, unless no default on param.
if param_name not in super_annotations or param.default == param.empty:
annotations[param_name] = "typing.Any" # Todo: Improve this?
event_cls_qualname = ".".join([cls.__qualname__, name])
event_cls_dict = {
"__annotations__": annotations,
"__module__": cls.__module__,
"__qualname__": event_cls_qualname,
}
# Create the event class object.
return type(name, bases, event_cls_dict)
def __call__(cls, *args: Any, **kwargs: Any) -> Any:
try:
created_event_classes = aggregate_has_many_created_event_classes[cls]
raise TypeError(
"""Can't decide which of many "created" event classes to use: """
f"""'{"', '".join(created_event_classes)}'. Please use class """
"arg 'created_event_name' or @event decorator on __init__ method."
)
except KeyError:
pass
self_init: WrapperDescriptorType = cls.__init__ # type: ignore
kwargs = _coerce_args_to_kwargs(
self_init,
args,
kwargs,
expects_id=cls in _annotations_mention_id,
)
return cls._create(
event_class=cls._created_event_class,
**kwargs,
)
def _create(
cls,
event_class: Type[AggregateCreated[TAggregate]],
*,
id: Optional[UUID] = None,
**kwargs: Any,
) -> TAggregate:
"""
Factory method to construct a new
aggregate object instance.
"""
# Construct the domain event class,
# with an ID and version, and the
# a topic for the aggregate class.
create_id_kwargs = {
k: v for k, v in kwargs.items() if k in cls._create_id_param_names
}
originator_id = id or cls.create_id(**create_id_kwargs)
# Impose the required common "created" event attribute values.
kwargs = kwargs.copy()
kwargs.update(
originator_topic=get_topic(cls),
originator_id=originator_id,
originator_version=cls.INITIAL_VERSION,
timestamp=event_class.create_timestamp(),
)
try:
# noinspection PyArgumentList
created_event = event_class(
**kwargs,
)
except TypeError as e:
msg = f"Unable to construct '{event_class.__name__}' event: {e}"
raise TypeError(msg)
# Construct the aggregate object.
agg = created_event.mutate(None)
assert agg is not None
# Append the domain event to pending list.
agg.pending_events.append(created_event)
# Return the aggregate.
return agg
# noinspection PyUnusedLocal
@staticmethod
def create_id(**kwargs: Any) -> UUID:
"""
Returns a new aggregate ID.
"""
return uuid4()
class Aggregate(ABC, metaclass=MetaAggregate):
"""
Base class for aggregate roots.
"""
def __base_init__(
self, originator_id: UUID, originator_version: int, timestamp: datetime
) -> None:
"""
Initialises an aggregate object with an :data:`id`, a :data:`version`
number, and a :data:`timestamp`.
"""
self._id = originator_id
self._version = originator_version
self._created_on = timestamp
self._modified_on = timestamp
self._pending_events: List[AggregateEvent[Any]] = []
@property
def id(self) -> UUID:
"""
The ID of the aggregate.
"""
return self._id
@property
def version(self) -> int:
"""
The version number of the aggregate.
"""
return self._version
@version.setter
def version(self, version: int) -> None:
# noinspection PyAttributeOutsideInit
self._version = version
@property
def created_on(self) -> datetime:
"""
The date and time when the aggregate was created.
"""
return self._created_on
@property
def modified_on(self) -> datetime:
"""
The date and time when the aggregate was last modified.
"""
return self._modified_on
@modified_on.setter
def modified_on(self, modified_on: datetime) -> None:
# noinspection PyAttributeOutsideInit
self._modified_on = modified_on
@property
def pending_events(self) -> List[AggregateEvent[Any]]:
"""
A list of pending events.
"""
return self._pending_events
class Event(AggregateEvent[TAggregate]):
pass
class Created(Event[TAggregate], AggregateCreated[TAggregate]):
pass
def __eq__(self, other: Any) -> bool:
return type(self) == type(other) and self.__dict__ == other.__dict__
def __repr__(self) -> str:
attrs = [
f"{k.lstrip('_')}={v!r}"
for k, v in self.__dict__.items()
if k != "_pending_events"
]
return f"{type(self).__name__}({', '.join(attrs)})"
def trigger_event(
self,
event_class: Type[AggregateEvent[Any]],
**kwargs: Any,
) -> None:
"""
Triggers domain event of given type, by creating
an event object and using it to mutate the aggregate.
"""
# Construct the domain event as the
# next in the aggregate's sequence.
# Use counting to generate the sequence.
next_version = self.version + 1
# Impose the required common domain event attribute values.
kwargs = kwargs.copy()
kwargs.update(
originator_id=self.id,
originator_version=next_version,
timestamp=event_class.create_timestamp(),
)
try:
new_event = event_class(**kwargs)
except TypeError as e:
raise TypeError(f"Can't construct event {event_class}: {e}")
# Mutate aggregate with domain event.
new_event.mutate(self)
# Append the domain event to pending list.
self.pending_events.append(new_event)
def collect_events(self) -> List[AggregateEvent[Any]]:
"""
Collects and returns a list of pending aggregate
:class:`AggregateEvent` objects.
"""
collected = []
while self.pending_events:
collected.append(self.pending_events.pop(0))
return collected
# @overload
# def aggregate(*, created_event_name: str) -> Callable[[Any], Type[Aggregate]]:
# ...
#
#
# @overload
# def aggregate(cls: Any) -> Type[Aggregate]:
# ...
def aggregate(
cls: Optional[Any] = None,
*,
created_event_name: Optional[str] = None,
) -> Union[Type[Aggregate], Callable[[Any], Type[Aggregate]]]:
"""
Converts the class that was passed in to inherit from Aggregate.
.. code-block:: python
@aggregate
class MyAggregate:
pass
...is equivalent to...
.. code-block:: python
class MyAggregate(Aggregate):
pass
"""
def decorator(cls_: Any) -> Type[Aggregate]:
if issubclass(cls_, Aggregate):
raise TypeError(f"{cls_.__qualname__} is already an Aggregate")
bases = cls_.__bases__
if bases == (object,):
bases = (Aggregate,)
else:
bases += (Aggregate,)
cls_dict = dict()
cls_dict.update(cls_.__dict__)
cls_ = MetaAggregate(
cls_.__qualname__,
bases,
cls_dict,
created_event_name=created_event_name,
)
assert issubclass(cls_, Aggregate)
return cls_
if cls:
return decorator(cls)
else:
return decorator
class OriginatorIDError(Exception):
"""
Raised when a domain event can't be applied to
an aggregate due to an ID mismatch indicating
the domain event is not in the aggregate's
sequence of events.
"""
class OriginatorVersionError(Exception):
"""
Raised when a domain event can't be applied to
an aggregate due to version mismatch indicating
the domain event is not the next in the aggregate's
sequence of events.
"""
class VersionError(OriginatorVersionError):
"""
Old name for 'OriginatorVersionError'.
This class exists to maintain backwards-compatibility
but will be removed in a future version Please use
'OriginatorVersionError' instead.
"""
class Snapshot(DomainEvent[TAggregate], metaclass=MetaDomainEvent):
# noinspection PyUnresolvedReferences
"""
Snapshots represent the state of an aggregate at a particular
version.
Constructor arguments:
:param UUID originator_id: ID of originating aggregate.
:param int originator_version: version of originating aggregate.
:param datetime timestamp: date-time of the event
:param str topic: string that includes a class and its module
:param dict state: version of originating aggregate.
"""
topic: str
state: Dict[str, Any]
@classmethod
def take(cls, aggregate: TAggregate) -> "Snapshot[TAggregate]":
"""
Creates a snapshot of the given :class:`Aggregate` object.
"""
aggregate_state = dict(aggregate.__dict__)
aggregate_state.pop("_pending_events")
class_version = getattr(type(aggregate), "class_version", 1)
if class_version > 1:
aggregate_state["class_version"] = class_version
originator_id = aggregate_state.pop("_id")
originator_version = aggregate_state.pop("_version")
# noinspection PyArgumentList
return cls( # type: ignore
originator_id=originator_id,
originator_version=originator_version,
timestamp=cls.create_timestamp(),
topic=get_topic(type(aggregate)),
state=aggregate_state,
)
def mutate(self, _: Optional[TAggregate]) -> TAggregate:
"""
Reconstructs the snapshotted :class:`Aggregate` object.
"""
cls = resolve_topic(self.topic)
assert issubclass(cls, Aggregate)
aggregate_state = dict(self.state)
from_version = aggregate_state.pop("class_version", 1)
class_version = getattr(cls, "class_version", 1)
while from_version < class_version:
upcast_name = f"upcast_v{from_version}_v{from_version + 1}"
upcast = getattr(cls, upcast_name)
upcast(aggregate_state)
from_version += 1
aggregate_state["_id"] = self.originator_id
aggregate_state["_version"] = self.originator_version
aggregate_state["_pending_events"] = []
aggregate: TAggregate = object.__new__(cls)
aggregate.__dict__.update(aggregate_state)
return aggregate
| 35.498061
| 88
| 0.61324
|
baa310ddd6f096314432716e92c19da3f423cffc
| 60,054
|
py
|
Python
|
pandas/tests/frame/methods/test_replace.py
|
Japanuspus/pandas
|
e38e987160c792f315685dc74fc1fc33d9389a71
|
[
"BSD-3-Clause"
] | 1
|
2020-11-15T11:21:04.000Z
|
2020-11-15T11:21:04.000Z
|
pandas/tests/frame/methods/test_replace.py
|
Japanuspus/pandas
|
e38e987160c792f315685dc74fc1fc33d9389a71
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/frame/methods/test_replace.py
|
Japanuspus/pandas
|
e38e987160c792f315685dc74fc1fc33d9389a71
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
from io import StringIO
import re
from typing import Dict, List, Union
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
@pytest.fixture
def mix_ab() -> Dict[str, List[Union[int, str]]]:
return {"a": list(range(4)), "b": list("ab..")}
@pytest.fixture
def mix_abc() -> Dict[str, List[Union[float, str]]]:
return {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]}
class TestDataFrameReplace:
def test_replace_inplace(self, datetime_frame, float_string_frame):
datetime_frame["A"][:5] = np.nan
datetime_frame["A"][-5:] = np.nan
tsframe = datetime_frame.copy()
return_value = tsframe.replace(np.nan, 0, inplace=True)
assert return_value is None
tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))
# mixed type
mf = float_string_frame
mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan
mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan
result = float_string_frame.replace(np.nan, 0)
expected = float_string_frame.fillna(value=0)
tm.assert_frame_equal(result, expected)
tsframe = datetime_frame.copy()
return_value = tsframe.replace([np.nan], [0], inplace=True)
assert return_value is None
tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))
def test_regex_replace_scalar(self, mix_ab):
obj = {"a": list("ab.."), "b": list("efgh")}
dfobj = DataFrame(obj)
dfmix = DataFrame(mix_ab)
# simplest cases
# regex -> value
# obj frame
res = dfobj.replace(r"\s*\.\s*", np.nan, regex=True)
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.replace(r"\s*\.\s*", np.nan, regex=True)
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True)
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True)
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True)
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True)
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1")
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1")
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1")
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
res = dfmix.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1")
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self, mix_ab):
obj = {"a": list("ab.."), "b": list("efgh")}
dfobj = DataFrame(obj)
dfmix = DataFrame(mix_ab)
# simplest cases
# regex -> value
# obj frame
res = dfobj.copy()
return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True)
assert return_value is None
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.copy()
return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True)
assert return_value is None
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.copy()
return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True)
assert return_value is None
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True)
assert return_value is None
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
return_value = res.replace(
re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True
)
assert return_value is None
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.copy()
return_value = res.replace(
re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True
)
assert return_value is None
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.copy()
return_value = res.replace(
re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True
)
assert return_value is None
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
return_value = res.replace(
re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True
)
assert return_value is None
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
res = dfobj.copy()
return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.copy()
return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.copy()
return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True)
assert return_value is None
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True)
assert return_value is None
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
return_value = res.replace(
regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True
)
assert return_value is None
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.copy()
return_value = res.replace(
regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True
)
assert return_value is None
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.copy()
return_value = res.replace(
regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True
)
assert return_value is None
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
return_value = res.replace(
regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True
)
assert return_value is None
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"e|f|g"]
values = [np.nan, "crap"]
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame(
{
"a": ["a", "b", np.nan, np.nan],
"b": ["crap"] * 3 + ["h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"]
values = [r"\1\1", r"\1_crap"]
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["e_crap", "f_crap", "g_crap", "h"],
"c": ["h", "e_crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"e"]
values = [r"\1\1", r"crap"]
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"e"]
values = [r"\1\1", r"crap"]
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
# same as above with inplace=True
# lists of regexes and values
obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"e|f|g"]
values = [np.nan, "crap"]
res = dfobj.copy()
return_value = res.replace(to_replace_res, values, inplace=True, regex=True)
assert return_value is None
expec = DataFrame(
{
"a": ["a", "b", np.nan, np.nan],
"b": ["crap"] * 3 + ["h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"]
values = [r"\1\1", r"\1_crap"]
res = dfobj.copy()
return_value = res.replace(to_replace_res, values, inplace=True, regex=True)
assert return_value is None
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["e_crap", "f_crap", "g_crap", "h"],
"c": ["h", "e_crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"e"]
values = [r"\1\1", r"crap"]
res = dfobj.copy()
return_value = res.replace(to_replace_res, values, inplace=True, regex=True)
assert return_value is None
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"e"]
values = [r"\1\1", r"crap"]
res = dfobj.copy()
return_value = res.replace(value=values, regex=to_replace_res, inplace=True)
assert return_value is None
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self, mix_ab):
# mixed frame to make sure this doesn't break things
dfmix = DataFrame(mix_ab)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"a"]
values = [np.nan, "crap"]
mix2 = {"a": list(range(4)), "b": list("ab.."), "c": list("halo")}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame(
{
"a": mix2["a"],
"b": ["crap", "b", np.nan, np.nan],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(a|b)"]
values = [r"\1\1", r"\1_crap"]
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self, mix_ab):
dfmix = DataFrame(mix_ab)
# the same inplace
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"a"]
values = [np.nan, "crap"]
res = dfmix.copy()
return_value = res.replace(to_replace_res, values, inplace=True, regex=True)
assert return_value is None
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b", np.nan, np.nan]})
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(a|b)"]
values = [r"\1\1", r"\1_crap"]
res = dfmix.copy()
return_value = res.replace(to_replace_res, values, inplace=True, regex=True)
assert return_value is None
expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.copy()
return_value = res.replace(to_replace_res, values, inplace=True, regex=True)
assert return_value is None
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.copy()
return_value = res.replace(regex=to_replace_res, value=values, inplace=True)
assert return_value is None
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self, mix_abc):
dfmix = DataFrame(mix_abc)
# dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True)
res2 = dfmix.copy()
return_value = res2.replace(
{"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True
)
assert return_value is None
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True)
res2 = dfmix.copy()
return_value = res2.replace(
{"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True
)
assert return_value is None
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
res = dfmix.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"})
res2 = dfmix.copy()
return_value = res2.replace(
regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True
)
assert return_value is None
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame(
{"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]}
)
res = dfmix.replace("a", {"b": np.nan}, regex=True)
res2 = dfmix.copy()
return_value = res2.replace("a", {"b": np.nan}, regex=True, inplace=True)
assert return_value is None
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
res = dfmix.replace("a", {"b": np.nan}, regex=True)
res2 = dfmix.copy()
return_value = res2.replace(regex="a", value={"b": np.nan}, inplace=True)
assert return_value is None
expec = DataFrame(
{"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self, mix_abc):
# nested dicts will not work until this is implemented for Series
dfmix = DataFrame(mix_abc)
res = dfmix.replace({"b": {r"\s*\.\s*": np.nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
return_value = res2.replace(
{"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True
)
assert return_value is None
res3 = dfmix.replace(regex={"b": {r"\s*\.\s*": np.nan}})
return_value = res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True)
assert return_value is None
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
tm.assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_non_first_character(self):
# GH 25259
df = DataFrame({"first": ["abc", "bca", "cab"]})
expected = DataFrame({"first": [".bc", "bc.", "c.b"]})
result = df.replace({"a": "."}, regex=True)
tm.assert_frame_equal(result, expected)
def test_regex_replace_dict_nested_gh4115(self):
df = DataFrame({"Type": ["Q", "T", "Q", "Q", "T"], "tmp": 2})
expected = DataFrame({"Type": [0, 1, 0, 0, 1], "tmp": 2})
result = df.replace({"Type": {"Q": 0, "T": 1}})
tm.assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self, mix_abc):
df = DataFrame(mix_abc)
expec = DataFrame(
{
"a": mix_abc["a"],
"b": np.array([np.nan] * 4),
"c": [np.nan, np.nan, np.nan, "d"],
}
)
res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True)
res2 = df.copy()
res3 = df.copy()
return_value = res2.replace(
[r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True
)
assert return_value is None
return_value = res3.replace(
regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True
)
assert return_value is None
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self, mix_abc):
# what happens when you try to replace a numeric value with a regex?
df = DataFrame(mix_abc)
res = df.replace(r"\s*\.\s*", 0, regex=True)
res2 = df.copy()
return_value = res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True)
assert return_value is None
res3 = df.copy()
return_value = res3.replace(regex=r"\s*\.\s*", value=0, inplace=True)
assert return_value is None
expec = DataFrame({"a": mix_abc["a"], "b": ["a", "b", 0, 0], "c": mix_abc["c"]})
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self, mix_abc):
df = DataFrame(mix_abc)
res = df.replace([r"\s*\.\s*", "b"], 0, regex=True)
res2 = df.copy()
return_value = res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True)
assert return_value is None
res3 = df.copy()
return_value = res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True)
assert return_value is None
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", 0, 0, 0], "c": ["a", 0, np.nan, "d"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self, mix_abc):
df = DataFrame(mix_abc)
s1 = Series({"b": r"\s*\.\s*"})
s2 = Series({"b": np.nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
return_value = res2.replace(s1, s2, inplace=True, regex=True)
assert return_value is None
res3 = df.copy()
return_value = res3.replace(regex=s1, value=s2, inplace=True)
assert return_value is None
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self, mix_abc):
df = DataFrame(mix_abc)
expec = DataFrame({"a": ["a", 1, 2, 3], "b": mix_abc["b"], "c": mix_abc["c"]})
res = df.replace(0, "a")
tm.assert_frame_equal(res, expec)
assert res.a.dtype == np.object_
@pytest.mark.parametrize("metachar", ["[]", "()", r"\d", r"\w", r"\s"])
def test_replace_regex_metachar(self, metachar):
df = DataFrame({"a": [metachar, "else"]})
result = df.replace({"a": {metachar: "paren"}})
expected = DataFrame({"a": ["paren", "else"]})
tm.assert_frame_equal(result, expected)
def test_replace(self, datetime_frame):
datetime_frame["A"][:5] = np.nan
datetime_frame["A"][-5:] = np.nan
zero_filled = datetime_frame.replace(np.nan, -1e8)
tm.assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8))
tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), datetime_frame)
datetime_frame["A"][:5] = np.nan
datetime_frame["A"][-5:] = np.nan
datetime_frame["B"][:5] = -1e8
# empty
df = DataFrame(index=["a", "b"])
tm.assert_frame_equal(df, df.replace(5, 7))
# GH 11698
# test for mixed data types.
df = DataFrame(
[("-", pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))]
)
df1 = df.replace("-", np.nan)
expected_df = DataFrame(
[(np.nan, pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))]
)
tm.assert_frame_equal(df1, expected_df)
def test_replace_list(self):
obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r".", r"e"]
values = [np.nan, "crap"]
res = dfobj.replace(to_replace_res, values)
expec = DataFrame(
{
"a": ["a", "b", np.nan, np.nan],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r".", r"f"]
values = [r"..", r"crap"]
res = dfobj.replace(to_replace_res, values)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["e", "crap", "g", "h"],
"c": ["h", "e", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
def test_replace_with_empty_list(self):
# GH 21977
s = Series([["a", "b"], [], np.nan, [1]])
df = DataFrame({"col": s})
expected = df
result = df.replace([], np.nan)
tm.assert_frame_equal(result, expected)
# GH 19266
with pytest.raises(ValueError, match="cannot assign mismatch"):
df.replace({np.nan: []})
with pytest.raises(ValueError, match="cannot assign mismatch"):
df.replace({np.nan: ["dummy", "alt"]})
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}})
result = df.replace(0, {"zero": 0.5, "one": 1.0})
expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 2.0, "b": 1.0}})
tm.assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
tm.assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}})
s = Series({"zero": 0.0, "one": 2.0})
result = df.replace(s, {"zero": 0.5, "one": 1.0})
expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 1.0, "b": 0.0}})
tm.assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
tm.assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([["foo", "bar", "bah"], ["bar", "foo", "bah"]])
m = {"foo": 1, "bar": 2, "bah": 3}
rep = df.replace(m)
expec = Series([np.int64] * 3)
res = rep.dtypes
tm.assert_series_equal(expec, res)
def test_replace_mixed(self, float_string_frame):
mf = float_string_frame
mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan
mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan
result = float_string_frame.replace(np.nan, -18)
expected = float_string_frame.fillna(value=-18)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result.replace(-18, np.nan), float_string_frame)
result = float_string_frame.replace(np.nan, -1e8)
expected = float_string_frame.fillna(value=-1e8)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result.replace(-1e8, np.nan), float_string_frame)
# int block upcasting
df = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0, 1], dtype="int64"),
}
)
expected = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0.5, 1], dtype="float64"),
}
)
result = df.replace(0, 0.5)
tm.assert_frame_equal(result, expected)
return_value = df.replace(0, 0.5, inplace=True)
assert return_value is None
tm.assert_frame_equal(df, expected)
# int block splitting
df = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0, 1], dtype="int64"),
"C": Series([1, 2], dtype="int64"),
}
)
expected = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0.5, 1], dtype="float64"),
"C": Series([1, 2], dtype="int64"),
}
)
result = df.replace(0, 0.5)
tm.assert_frame_equal(result, expected)
# to object block upcasting
df = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0, 1], dtype="int64"),
}
)
expected = DataFrame(
{
"A": Series([1, "foo"], dtype="object"),
"B": Series([0, 1], dtype="int64"),
}
)
result = df.replace(2, "foo")
tm.assert_frame_equal(result, expected)
expected = DataFrame(
{
"A": Series(["foo", "bar"], dtype="object"),
"B": Series([0, "foo"], dtype="object"),
}
)
result = df.replace([1, 2], ["foo", "bar"])
tm.assert_frame_equal(result, expected)
# test case from
df = DataFrame(
{"A": Series([3, 0], dtype="int64"), "B": Series([0, 3], dtype="int64")}
)
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype("float64")
m = df.mean()
expected.iloc[0, 0] = m[0]
expected.iloc[1, 1] = m[1]
tm.assert_frame_equal(result, expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({"col": range(1, 5)})
expected = DataFrame({"col": ["a", 2, 3, "b"]})
result = df.replace({"col": {1: "a", 4: "b"}})
tm.assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: "a", 4: "b"})
tm.assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({"col": range(1, 5)})
expected = DataFrame({"col": ["a", 2, 3, "b"]})
result = df.replace({-1: "-", 1: "a", 4: "b"})
tm.assert_frame_equal(expected, result)
result = df.replace({"col": {-1: "-", 1: "a", 4: "b"}})
tm.assert_frame_equal(expected, result)
def test_replace_value_is_none(self, datetime_frame):
orig_value = datetime_frame.iloc[0, 0]
orig2 = datetime_frame.iloc[1, 0]
datetime_frame.iloc[0, 0] = np.nan
datetime_frame.iloc[1, 0] = 1
result = datetime_frame.replace(to_replace={np.nan: 0})
expected = datetime_frame.T.replace(to_replace={np.nan: 0}).T
tm.assert_frame_equal(result, expected)
result = datetime_frame.replace(to_replace={np.nan: 0, 1: -1e8})
tsframe = datetime_frame.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
tm.assert_frame_equal(expected, result)
datetime_frame.iloc[0, 0] = orig_value
datetime_frame.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self, datetime_frame):
# dtypes
tsframe = datetime_frame.copy().astype(np.float32)
tsframe["A"][:5] = np.nan
tsframe["A"][-5:] = np.nan
zero_filled = tsframe.replace(np.nan, -1e8)
tm.assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe)
tsframe["A"][:5] = np.nan
tsframe["A"][-5:] = np.nan
tsframe["B"][:5] = -1e8
b = tsframe["B"]
b[b == -1e8] = np.nan
tsframe["B"] = b
result = tsframe.fillna(method="bfill")
tm.assert_frame_equal(result, tsframe.fillna(method="bfill"))
@pytest.mark.parametrize(
"frame, to_replace, value, expected",
[
(DataFrame({"ints": [1, 2, 3]}), 1, 0, DataFrame({"ints": [0, 2, 3]})),
(
DataFrame({"ints": [1, 2, 3]}, dtype=np.int32),
1,
0,
DataFrame({"ints": [0, 2, 3]}, dtype=np.int32),
),
(
DataFrame({"ints": [1, 2, 3]}, dtype=np.int16),
1,
0,
DataFrame({"ints": [0, 2, 3]}, dtype=np.int16),
),
(
DataFrame({"bools": [True, False, True]}),
False,
True,
DataFrame({"bools": [True, True, True]}),
),
(
DataFrame({"complex": [1j, 2j, 3j]}),
1j,
0,
DataFrame({"complex": [0j, 2j, 3j]}),
),
(
DataFrame(
{
"datetime64": Index(
[
datetime(2018, 5, 28),
datetime(2018, 7, 28),
datetime(2018, 5, 28),
]
)
}
),
datetime(2018, 5, 28),
datetime(2018, 7, 28),
DataFrame({"datetime64": Index([datetime(2018, 7, 28)] * 3)}),
),
# GH 20380
(
DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["foo"]}),
"foo",
"bar",
DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["bar"]}),
),
(
DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern"),
"B": [0, np.nan, 2],
}
),
Timestamp("20130102", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
),
),
# GH 35376
(
DataFrame([[1, 1.0], [2, 2.0]]),
1.0,
5,
DataFrame([[5, 5.0], [2, 2.0]]),
),
(
DataFrame([[1, 1.0], [2, 2.0]]),
1,
5,
DataFrame([[5, 5.0], [2, 2.0]]),
),
(
DataFrame([[1, 1.0], [2, 2.0]]),
1.0,
5.0,
DataFrame([[5, 5.0], [2, 2.0]]),
),
(
DataFrame([[1, 1.0], [2, 2.0]]),
1,
5.0,
DataFrame([[5, 5.0], [2, 2.0]]),
),
],
)
def test_replace_dtypes(self, frame, to_replace, value, expected):
result = getattr(frame, "replace")(to_replace, value)
tm.assert_frame_equal(result, expected)
def test_replace_input_formats_listlike(self):
# both dicts
to_rep = {"A": np.nan, "B": 0, "C": ""}
values = {"A": 0, "B": -1, "C": "missing"}
df = DataFrame(
{"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}
)
filled = df.replace(to_rep, values)
expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()}
tm.assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame(
{"A": [np.nan, 5, np.inf], "B": [5, 2, 0], "C": ["", "asdf", "fd"]}
)
tm.assert_frame_equal(result, expected)
# scalar to dict
values = {"A": 0, "B": -1, "C": "missing"}
df = DataFrame(
{"A": [np.nan, 0, np.nan], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}
)
filled = df.replace(np.nan, values)
expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()}
tm.assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, ""]
values = [-2, -1, "missing"]
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
return_value = expected.replace(to_rep[i], values[i], inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
df.replace(to_rep, values[1:])
def test_replace_input_formats_scalar(self):
df = DataFrame(
{"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}
)
# dict to scalar
to_rep = {"A": np.nan, "B": 0, "C": ""}
filled = df.replace(to_rep, 0)
expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()}
tm.assert_frame_equal(filled, DataFrame(expected))
msg = "value argument must be scalar, dict, or Series"
with pytest.raises(TypeError, match=msg):
df.replace(to_rep, [np.nan, 0, ""])
# list to scalar
to_rep = [np.nan, 0, ""]
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
return_value = expected.replace(to_rep[i], -1, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series(
{
0: "Strongly Agree",
1: "Agree",
2: "Neutral",
3: "Disagree",
4: "Strongly Disagree",
}
)
weights = {
"Agree": 4,
"Disagree": 2,
"Neutral": 3,
"Strongly Agree": 5,
"Strongly Disagree": 1,
}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series(
{
0: "Strongly Agree",
1: "Agree",
2: "Neutral",
3: "Disagree",
4: "Strongly Disagree",
}
)
weights = Series(
{
"Agree": 4,
"Disagree": 2,
"Neutral": 3,
"Strongly Agree": 5,
"Strongly Disagree": 1,
}
)
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[np.nan, 1]))
res1 = df.replace(to_replace={np.nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0])
expected = DataFrame({"A": [0, -1e8]})
tm.assert_frame_equal(res1, res2)
tm.assert_frame_equal(res2, res3)
tm.assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = pd.read_csv(StringIO(raw), sep=r"\s+")
res = df.replace({r"\D": 1})
tm.assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({"a": [True, False], "b": list("ab")})
result = df.replace(True, "a")
expected = DataFrame({"a": ["a", False], "b": df.b})
tm.assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace("asdf", "fdsa")
tm.assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
tm.assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
result = df.replace({"asdf": "asdb", True: "yes"})
expected = DataFrame({0: ["yes", False], 1: [False, "yes"]})
tm.assert_frame_equal(result, expected)
def test_replace_dict_strings_vs_ints(self):
# GH#34789
df = DataFrame({"Y0": [1, 2], "Y1": [3, 4]})
result = df.replace({"replace_string": "test"})
tm.assert_frame_equal(result, df)
result = df["Y0"].replace({"replace_string": "test"})
tm.assert_series_equal(result, df["Y0"])
def test_replace_truthy(self):
df = DataFrame({"a": [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
tm.assert_frame_equal(r, e)
def test_nested_dict_overlapping_keys_replace_int(self):
# GH 27660 keep behaviour consistent for simple dictionary and
# nested dictionary replacement
df = DataFrame({"a": list(range(1, 5))})
result = df.replace({"a": dict(zip(range(1, 5), range(2, 6)))})
expected = df.replace(dict(zip(range(1, 5), range(2, 6))))
tm.assert_frame_equal(result, expected)
def test_nested_dict_overlapping_keys_replace_str(self):
# GH 27660
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({"a": astr})
result = df.replace(dict(zip(astr, bstr)))
expected = df.replace({"a": dict(zip(astr, bstr))})
tm.assert_frame_equal(result, expected)
def test_replace_swapping_bug(self):
df = DataFrame({"a": [True, False, True]})
res = df.replace({"a": {True: "Y", False: "N"}})
expect = DataFrame({"a": ["Y", "N", "Y"]})
tm.assert_frame_equal(res, expect)
df = DataFrame({"a": [0, 1, 0]})
res = df.replace({"a": {0: "Y", 1: "N"}})
expect = DataFrame({"a": ["Y", "N", "Y"]})
tm.assert_frame_equal(res, expect)
def test_replace_period(self):
d = {
"fname": {
"out_augmented_AUG_2011.json": pd.Period(year=2011, month=8, freq="M"),
"out_augmented_JAN_2011.json": pd.Period(year=2011, month=1, freq="M"),
"out_augmented_MAY_2012.json": pd.Period(year=2012, month=5, freq="M"),
"out_augmented_SUBSIDY_WEEK.json": pd.Period(
year=2011, month=4, freq="M"
),
"out_augmented_AUG_2012.json": pd.Period(year=2012, month=8, freq="M"),
"out_augmented_MAY_2011.json": pd.Period(year=2011, month=5, freq="M"),
"out_augmented_SEP_2013.json": pd.Period(year=2013, month=9, freq="M"),
}
}
df = DataFrame(
[
"out_augmented_AUG_2012.json",
"out_augmented_SEP_2013.json",
"out_augmented_SUBSIDY_WEEK.json",
"out_augmented_MAY_2012.json",
"out_augmented_MAY_2011.json",
"out_augmented_AUG_2011.json",
"out_augmented_JAN_2011.json",
],
columns=["fname"],
)
assert set(df.fname.values) == set(d["fname"].keys())
# We don't support converting object -> specialized EA in
# replace yet.
expected = DataFrame(
{"fname": [d["fname"][k] for k in df.fname.values]}, dtype=object
)
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {
"fname": {
"out_augmented_AUG_2011.json": Timestamp("2011-08"),
"out_augmented_JAN_2011.json": Timestamp("2011-01"),
"out_augmented_MAY_2012.json": Timestamp("2012-05"),
"out_augmented_SUBSIDY_WEEK.json": Timestamp("2011-04"),
"out_augmented_AUG_2012.json": Timestamp("2012-08"),
"out_augmented_MAY_2011.json": Timestamp("2011-05"),
"out_augmented_SEP_2013.json": Timestamp("2013-09"),
}
}
df = DataFrame(
[
"out_augmented_AUG_2012.json",
"out_augmented_SEP_2013.json",
"out_augmented_SUBSIDY_WEEK.json",
"out_augmented_MAY_2012.json",
"out_augmented_MAY_2011.json",
"out_augmented_AUG_2011.json",
"out_augmented_JAN_2011.json",
],
columns=["fname"],
)
assert set(df.fname.values) == set(d["fname"].keys())
expected = DataFrame({"fname": [d["fname"][k] for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern"),
"B": [0, np.nan, 2],
}
)
result = df.replace(np.nan, 1)
expected = DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern"),
"B": Series([0, 1, 2], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
result = df.fillna(1)
tm.assert_frame_equal(result, expected)
result = df.replace(0, np.nan)
expected = DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern"),
"B": [np.nan, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
result = df.replace(
Timestamp("20130102", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
)
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Eastern"))
tm.assert_frame_equal(result, expected)
# coerce to object
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific"))
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Pacific"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({"A": np.nan}, Timestamp("20130104"))
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
def test_replace_with_empty_dictlike(self, mix_abc):
# GH 15289
df = DataFrame(mix_abc)
tm.assert_frame_equal(df, df.replace({}))
tm.assert_frame_equal(df, df.replace(Series([], dtype=object)))
tm.assert_frame_equal(df, df.replace({"b": {}}))
tm.assert_frame_equal(df, df.replace(Series({"b": {}})))
@pytest.mark.parametrize(
"to_replace, method, expected",
[
(0, "bfill", {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}),
(
np.nan,
"bfill",
{"A": [0, 1, 2], "B": [5.0, 7.0, 7.0], "C": ["a", "b", "c"]},
),
("d", "ffill", {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}),
(
[0, 2],
"bfill",
{"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]},
),
(
[1, 2],
"pad",
{"A": [0, 0, 0], "B": [5, np.nan, 7], "C": ["a", "b", "c"]},
),
(
(1, 2),
"bfill",
{"A": [0, 2, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]},
),
(
["b", "c"],
"ffill",
{"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "a", "a"]},
),
],
)
def test_replace_method(self, to_replace, method, expected):
# GH 19632
df = DataFrame({"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]})
result = df.replace(to_replace=to_replace, value=None, method=method)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"replace_dict, final_data",
[({"a": 1, "b": 1}, [[3, 3], [2, 2]]), ({"a": 1, "b": 2}, [[3, 1], [2, 3]])],
)
def test_categorical_replace_with_dict(self, replace_dict, final_data):
# GH 26988
df = DataFrame([[1, 1], [2, 2]], columns=["a", "b"], dtype="category")
final_data = np.array(final_data)
a = pd.Categorical(final_data[:, 0], categories=[3, 2])
excat = [3, 2] if replace_dict["b"] == 1 else [1, 3]
b = pd.Categorical(final_data[:, 1], categories=excat)
expected = DataFrame({"a": a, "b": b})
result = df.replace(replace_dict, 3)
tm.assert_frame_equal(result, expected)
msg = (
r"Attributes of DataFrame.iloc\[:, 0\] \(column name=\"a\"\) are "
"different"
)
with pytest.raises(AssertionError, match=msg):
# ensure non-inplace call does not affect original
tm.assert_frame_equal(df, expected)
return_value = df.replace(replace_dict, 3, inplace=True)
assert return_value is None
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"df, to_replace, exp",
[
(
{"col1": [1, 2, 3], "col2": [4, 5, 6]},
{4: 5, 5: 6, 6: 7},
{"col1": [1, 2, 3], "col2": [5, 6, 7]},
),
(
{"col1": [1, 2, 3], "col2": ["4", "5", "6"]},
{"4": "5", "5": "6", "6": "7"},
{"col1": [1, 2, 3], "col2": ["5", "6", "7"]},
),
],
)
def test_replace_commutative(self, df, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
# also added to data frame whilst issue was for series
df = DataFrame(df)
expected = DataFrame(exp)
result = df.replace(to_replace)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"replacer",
[
Timestamp("20170827"),
np.int8(1),
np.int16(1),
np.float32(1),
np.float64(1),
],
)
def test_replace_replacer_dtype(self, replacer):
# GH26632
df = DataFrame(["a"])
result = df.replace({"a": replacer, "b": replacer})
expected = DataFrame([replacer])
tm.assert_frame_equal(result, expected)
def test_replace_after_convert_dtypes(self):
# GH31517
df = DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64")
result = df.replace(1, 10)
expected = DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
df = DataFrame({"one": ["a", "b ", "c"], "two": ["d ", "e ", "f "]})
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
df.replace(lambda x: x.strip())
@pytest.mark.parametrize("dtype", ["float", "float64", "int64", "Int64", "boolean"])
@pytest.mark.parametrize("value", [np.nan, pd.NA])
def test_replace_no_replacement_dtypes(self, dtype, value):
# https://github.com/pandas-dev/pandas/issues/32988
df = DataFrame(np.eye(2), dtype=dtype)
result = df.replace(to_replace=[None, -np.inf, np.inf], value=value)
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize("replacement", [np.nan, 5])
def test_replace_with_duplicate_columns(self, replacement):
# GH 24798
result = DataFrame({"A": [1, 2, 3], "A1": [4, 5, 6], "B": [7, 8, 9]})
result.columns = list("AAB")
expected = DataFrame(
{"A": [1, 2, 3], "A1": [4, 5, 6], "B": [replacement, 8, 9]}
)
expected.columns = list("AAB")
result["B"] = result["B"].replace(7, replacement)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(
reason="replace() changes dtype from period to object, see GH34871", strict=True
)
def test_replace_period_ignore_float(self):
"""
Regression test for GH#34871: if df.replace(1.0, 0.0) is called on a df
with a Period column the old, faulty behavior is to raise TypeError.
"""
df = DataFrame({"Per": [pd.Period("2020-01")] * 3})
result = df.replace(1.0, 0.0)
expected = DataFrame({"Per": [pd.Period("2020-01")] * 3})
tm.assert_frame_equal(expected, result)
def test_replace_value_category_type(self):
"""
Test for #23305: to ensure category dtypes are maintained
after replace with direct values
"""
# create input data
input_dict = {
"col1": [1, 2, 3, 4],
"col2": ["a", "b", "c", "d"],
"col3": [1.5, 2.5, 3.5, 4.5],
"col4": ["cat1", "cat2", "cat3", "cat4"],
"col5": ["obj1", "obj2", "obj3", "obj4"],
}
# explicitly cast columns as category and order them
input_df = DataFrame(data=input_dict).astype(
{"col2": "category", "col4": "category"}
)
input_df["col2"] = input_df["col2"].cat.reorder_categories(
["a", "b", "c", "d"], ordered=True
)
input_df["col4"] = input_df["col4"].cat.reorder_categories(
["cat1", "cat2", "cat3", "cat4"], ordered=True
)
# create expected dataframe
expected_dict = {
"col1": [1, 2, 3, 4],
"col2": ["a", "b", "c", "z"],
"col3": [1.5, 2.5, 3.5, 4.5],
"col4": ["cat1", "catX", "cat3", "cat4"],
"col5": ["obj9", "obj2", "obj3", "obj4"],
}
# explicitly cast columns as category and order them
expected = DataFrame(data=expected_dict).astype(
{"col2": "category", "col4": "category"}
)
expected["col2"] = expected["col2"].cat.reorder_categories(
["a", "b", "c", "z"], ordered=True
)
expected["col4"] = expected["col4"].cat.reorder_categories(
["cat1", "catX", "cat3", "cat4"], ordered=True
)
# replace values in input dataframe
input_df = input_df.replace("d", "z")
input_df = input_df.replace("obj1", "obj9")
result = input_df.replace("cat2", "catX")
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(
reason="category dtype gets changed to object type after replace, see #35268",
strict=True,
)
def test_replace_dict_category_type(self, input_category_df, expected_category_df):
"""
Test to ensure category dtypes are maintained
after replace with dict values
"""
# create input dataframe
input_dict = {"col1": ["a"], "col2": ["obj1"], "col3": ["cat1"]}
# explicitly cast columns as category
input_df = DataFrame(data=input_dict).astype(
{"col1": "category", "col2": "category", "col3": "category"}
)
# create expected dataframe
expected_dict = {"col1": ["z"], "col2": ["obj9"], "col3": ["catX"]}
# explicitly cast columns as category
expected = DataFrame(data=expected_dict).astype(
{"col1": "category", "col2": "category", "col3": "category"}
)
# replace values in input dataframe using a dict
result = input_df.replace({"a": "z", "obj1": "obj9", "cat1": "catX"})
tm.assert_frame_equal(result, expected)
def test_replace_with_compiled_regex(self):
# https://github.com/pandas-dev/pandas/issues/35680
df = DataFrame(["a", "b", "c"])
regex = re.compile("^a$")
result = df.replace({regex: "z"}, regex=True)
expected = DataFrame(["z", "b", "c"])
tm.assert_frame_equal(result, expected)
def test_replace_intervals(self):
# https://github.com/pandas-dev/pandas/issues/35931
df = DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]})
result = df.replace({"a": {pd.Interval(0, 1): "x"}})
expected = DataFrame({"a": ["x", "x"]})
tm.assert_frame_equal(result, expected)
def test_replace_unicode(self):
# GH: 16784
columns_values_map = {"positive": {"正面": 1, "中立": 1, "负面": 0}}
df1 = DataFrame({"positive": np.ones(3)})
result = df1.replace(columns_values_map)
expected = DataFrame({"positive": np.ones(3)})
tm.assert_frame_equal(result, expected)
| 36.730275
| 88
| 0.498968
|
c6200be3c56d1f1791deae6ff1ffdb2adc482e72
| 3,457
|
py
|
Python
|
src/python/pants/backend/python/pipenv_requirements.py
|
gshuflin/pants
|
cf483ead6d4d4a4cc4fc4ae18e3b5b633509d933
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/pipenv_requirements.py
|
gshuflin/pants
|
cf483ead6d4d4a4cc4fc4ae18e3b5b633509d933
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/pipenv_requirements.py
|
gshuflin/pants
|
cf483ead6d4d4a4cc4fc4ae18e3b5b633509d933
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from json import load
from pathlib import Path
from typing import Iterable, Mapping, Optional
from pkg_resources import Requirement
from pants.base.build_environment import get_buildroot
class PipenvRequirements:
"""Translates a Pipenv.lock file into an equivalent set `python_requirement_library` targets.
You may also use the parameter `module_mapping` to teach Pants what modules each of your
requirements provide. For any requirement unspecified, Pants will default to the name of the
requirement. This setting is important for Pants to know how to convert your import
statements back into your dependencies. For example:
pipenv_requirements(
module_mapping={
"ansicolors": ["colors"],
"setuptools": ["pkg_resources"],
}
)
"""
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(
self,
requirements_relpath: str = "Pipfile.lock",
module_mapping: Optional[Mapping[str, Iterable[str]]] = None,
pipfile_target: Optional[str] = None,
) -> None:
"""
:param requirements_relpath: The relpath from this BUILD file to the requirements file.
Defaults to a `Pipfile.lock` file sibling to the BUILD file.
:param module_mapping: a mapping of requirement names to a list of the modules they provide.
For example, `{"ansicolors": ["colors"]}`. Any unspecified requirements will use the
requirement name as the default module, e.g. "Django" will default to
`modules=["django"]`.
:param pipfile_target: a `_python_requirements_file` target to provide for cache invalidation
if the requirements_relpath value is not in the current rel_path
"""
lock_info = {}
requirements_path = Path(
get_buildroot(), self._parse_context.rel_path, requirements_relpath
)
with open(requirements_path, "r") as fp:
lock_info = load(fp)
if pipfile_target:
requirements_dep = pipfile_target
else:
requirements_file_target_name = requirements_relpath
self._parse_context.create_object(
"_python_requirements_file",
name=requirements_file_target_name,
sources=[requirements_relpath],
)
requirements_dep = f":{requirements_file_target_name}"
requirements = {**lock_info.get("default", {}), **lock_info.get("develop", {})}
for req, info in requirements.items():
req_str = f"{req}{info.get('version','')}"
if info.get("markers"):
req_str += f";{info['markers']}"
parsed_req = Requirement.parse(req_str)
req_module_mapping = (
{parsed_req.project_name: module_mapping[parsed_req.project_name]}
if module_mapping and parsed_req.project_name in module_mapping
else None
)
self._parse_context.create_object(
"python_requirement_library",
name=parsed_req.project_name,
requirements=[parsed_req],
dependencies=[requirements_dep],
module_mapping=req_module_mapping,
)
| 38.842697
| 101
| 0.641307
|
c4974b063857b68e63d4627c6f8a99f6fcbd033e
| 6,558
|
py
|
Python
|
SciDataTool/Classes/Norm_ref.py
|
enjoyneer87/SciDataTool
|
37ddc4071f1edb1270ee03e43595c3f943fb9bd8
|
[
"Apache-2.0"
] | null | null | null |
SciDataTool/Classes/Norm_ref.py
|
enjoyneer87/SciDataTool
|
37ddc4071f1edb1270ee03e43595c3f943fb9bd8
|
[
"Apache-2.0"
] | null | null | null |
SciDataTool/Classes/Norm_ref.py
|
enjoyneer87/SciDataTool
|
37ddc4071f1edb1270ee03e43595c3f943fb9bd8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Norm_ref.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/SciDataTool/tree/master/SciDataTool/Methods//Norm_ref
"""
from os import linesep
from sys import getsizeof
from ._check import check_var, raise_
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from .Normalization import Normalization
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Norm_ref.normalize import normalize
except ImportError as error:
normalize = error
from numpy import isnan
from ._check import InitUnKnowClassError
class Norm_ref(Normalization):
"""Normalization with a reference value (values/ref)"""
VERSION = 1
# cf Methods.Norm_ref.normalize
if isinstance(normalize, ImportError):
normalize = property(
fget=lambda x: raise_(
ImportError("Can't use Norm_ref method normalize: " + str(normalize))
)
)
else:
normalize = normalize
# save and copy methods are available in all object
save = save
copy = copy
def __init__(self, ref=1, unit="SI", init_dict=None, init_str=None):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for SciDataTool type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for SciDataTool Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "ref" in list(init_dict.keys()):
ref = init_dict["ref"]
if "unit" in list(init_dict.keys()):
unit = init_dict["unit"]
# Set the properties (value check and convertion are done in setter)
self.ref = ref
# Call Normalization init
super(Norm_ref, self).__init__(unit=unit)
# The class is frozen (in Normalization init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this object in a readeable string (for print)"""
Norm_ref_str = ""
# Get the properties inherited from Normalization
Norm_ref_str += super(Norm_ref, self).__str__()
Norm_ref_str += "ref = " + str(self.ref) + linesep
return Norm_ref_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from Normalization
if not super(Norm_ref, self).__eq__(other):
return False
if other.ref != self.ref:
return False
return True
def compare(self, other, name="self", ignore_list=None, is_add_value=False):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
# Check the properties inherited from Normalization
diff_list.extend(
super(Norm_ref, self).compare(
other, name=name, ignore_list=ignore_list, is_add_value=is_add_value
)
)
if (
other._ref is not None
and self._ref is not None
and isnan(other._ref)
and isnan(self._ref)
):
pass
elif other._ref != self._ref:
if is_add_value:
val_str = (
" (self=" + str(self._ref) + ", other=" + str(other._ref) + ")"
)
diff_list.append(name + ".ref" + val_str)
else:
diff_list.append(name + ".ref")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
# Get size of the properties inherited from Normalization
S += super(Norm_ref, self).__sizeof__()
S += getsizeof(self.ref)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
type_handle_ndarray: int
How to handle ndarray (0: tolist, 1: copy, 2: nothing)
keep_function : bool
True to keep the function object, else return str
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
# Get the properties inherited from Normalization
Norm_ref_dict = super(Norm_ref, self).as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
Norm_ref_dict["ref"] = self.ref
# The class name is added to the dict for deserialisation purpose
# Overwrite the mother class name
Norm_ref_dict["__class__"] = "Norm_ref"
return Norm_ref_dict
def _set_None(self):
"""Set all the properties to None (except SciDataTool object)"""
self.ref = None
# Set to None the properties inherited from Normalization
super(Norm_ref, self)._set_None()
def _get_ref(self):
"""getter of ref"""
return self._ref
def _set_ref(self, value):
"""setter of ref"""
check_var("ref", value, "float")
self._ref = value
ref = property(
fget=_get_ref,
fset=_set_ref,
doc=u"""reference value
:Type: float
""",
)
| 35.258065
| 107
| 0.597743
|
2b986fee48ef724eb923baaaa774d85889478d02
| 153
|
py
|
Python
|
CHCHCL.py
|
abphilip-codes/Codechef_Practice
|
21fd52e03df8a0f72a08b0e2a0b48dbd508aac95
|
[
"MIT"
] | 2
|
2021-07-26T03:32:24.000Z
|
2021-07-31T02:32:14.000Z
|
CHCHCL.py
|
abphilip-codes/Codechef_Practice
|
21fd52e03df8a0f72a08b0e2a0b48dbd508aac95
|
[
"MIT"
] | null | null | null |
CHCHCL.py
|
abphilip-codes/Codechef_Practice
|
21fd52e03df8a0f72a08b0e2a0b48dbd508aac95
|
[
"MIT"
] | 1
|
2021-07-14T17:45:33.000Z
|
2021-07-14T17:45:33.000Z
|
# https://www.codechef.com/problems/CHCHCL
for T in range(int(input())):
n,m=map(int,input().split())
print("Yes") if(n*m%2==0) else print("No")
| 30.6
| 46
| 0.620915
|
a36eb31619b5aef927e32c1f88596ef850c938d5
| 13,513
|
py
|
Python
|
sphinx/ext/autosummary/generate.py
|
merwok-forks/sphinx
|
b7cada236f765003a73ab5dca48f975d54c0c298
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/ext/autosummary/generate.py
|
merwok-forks/sphinx
|
b7cada236f765003a73ab5dca48f975d54c0c298
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/ext/autosummary/generate.py
|
merwok-forks/sphinx
|
b7cada236f765003a73ab5dca48f975d54c0c298
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary.generate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Usable as a library or script to generate automatic RST source files for
items referred to in autosummary:: directives.
Each generated RST file contains a single auto*:: directive which
extracts the docstring of the referred item.
Example Makefile rule::
generate:
sphinx-autogen -o source/generated source/*.rst
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import re
import sys
import pydoc
import optparse
import codecs
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2.sandbox import SandboxedEnvironment
from sphinx import package_dir
from sphinx.ext.autosummary import import_by_name, get_documenter
from sphinx.jinja2glue import BuiltinTemplateLoader
from sphinx.util.osutil import ensuredir
from sphinx.util.inspect import safe_getattr
# Add documenters to AutoDirective registry
from sphinx.ext.autodoc import add_documenter, \
ModuleDocumenter, ClassDocumenter, ExceptionDocumenter, DataDocumenter, \
FunctionDocumenter, MethodDocumenter, AttributeDocumenter, \
InstanceAttributeDocumenter
add_documenter(ModuleDocumenter)
add_documenter(ClassDocumenter)
add_documenter(ExceptionDocumenter)
add_documenter(DataDocumenter)
add_documenter(FunctionDocumenter)
add_documenter(MethodDocumenter)
add_documenter(AttributeDocumenter)
add_documenter(InstanceAttributeDocumenter)
if False:
# For type annotation
from typing import Any, Callable, Dict, Tuple, List # NOQA
from jinja2 import BaseLoader # NOQA
from sphinx import addnodes # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
def main(argv=sys.argv):
# type: (List[str]) -> None
usage = """%prog [OPTIONS] SOURCEFILE ..."""
p = optparse.OptionParser(usage.strip())
p.add_option("-o", "--output-dir", action="store", type="string",
dest="output_dir", default=None,
help="Directory to place all output in")
p.add_option("-s", "--suffix", action="store", type="string",
dest="suffix", default="rst",
help="Default suffix for files (default: %default)")
p.add_option("-t", "--templates", action="store", type="string",
dest="templates", default=None,
help="Custom template directory (default: %default)")
p.add_option("-i", "--imported-members", action="store_true",
dest="imported_members", default=False,
help="Document imported members (default: %default)")
options, args = p.parse_args(argv[1:])
if len(args) < 1:
p.error('no input files given')
generate_autosummary_docs(args, options.output_dir,
"." + options.suffix,
template_dir=options.templates,
imported_members=options.imported_members)
def _simple_info(msg):
# type: (unicode) -> None
print(msg)
def _simple_warn(msg):
# type: (unicode) -> None
print('WARNING: ' + msg, file=sys.stderr)
# -- Generating output ---------------------------------------------------------
def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
base_path=None, builder=None, template_dir=None,
imported_members=False):
# type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode, bool) -> None # NOQA
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
info('[autosummary] generating autosummary for: %s' %
', '.join(showed_sources))
if output_dir:
info('[autosummary] writing to %s' % output_dir)
if base_path is not None:
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
template_dirs = None # type: List[unicode]
template_dirs = [os.path.join(package_dir, 'ext',
'autosummary', 'templates')]
template_loader = None # type: BaseLoader
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
template_loader.init(builder, dirs=template_dirs)
else:
if template_dir:
template_dirs.insert(0, template_dir)
template_loader = FileSystemLoader(template_dirs) # type: ignore
template_env = SandboxedEnvironment(loader=template_loader)
# read
items = find_autosummary_in_files(sources)
# keep track of new files
new_files = []
# write
for name, path, template_name in sorted(set(items), key=str):
if path is None:
# The corresponding autosummary:: directive did not have
# a :toctree: option
continue
path = output_dir or os.path.abspath(path)
ensuredir(path)
try:
name, obj, parent, mod_name = import_by_name(name)
except ImportError as e:
warn('[autosummary] failed to import %r: %s' % (name, e))
continue
fn = os.path.join(path, name + suffix)
# skip it if it exists
if os.path.isfile(fn):
continue
new_files.append(fn)
with open(fn, 'w') as f:
doc = get_documenter(obj, parent)
if template_name is not None:
template = template_env.get_template(template_name)
else:
try:
template = template_env.get_template('autosummary/%s.rst'
% doc.objtype)
except TemplateNotFound:
template = template_env.get_template('autosummary/base.rst')
def get_members(obj, typ, include_public=[], imported=False):
# type: (Any, unicode, List[unicode], bool) -> Tuple[List[unicode], List[unicode]] # NOQA
items = [] # type: List[unicode]
for name in dir(obj):
try:
value = safe_getattr(obj, name)
except AttributeError:
continue
documenter = get_documenter(value, obj)
if documenter.objtype == typ:
if imported or getattr(value, '__module__', None) == obj.__name__:
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public, items
ns = {} # type: Dict[unicode, Any]
if doc.objtype == 'module':
ns['members'] = dir(obj)
ns['functions'], ns['all_functions'] = \
get_members(obj, 'function', imported=imported_members)
ns['classes'], ns['all_classes'] = \
get_members(obj, 'class', imported=imported_members)
ns['exceptions'], ns['all_exceptions'] = \
get_members(obj, 'exception', imported=imported_members)
elif doc.objtype == 'class':
ns['members'] = dir(obj)
ns['methods'], ns['all_methods'] = \
get_members(obj, 'method', ['__init__'], imported=imported_members)
ns['attributes'], ns['all_attributes'] = \
get_members(obj, 'attribute', imported=imported_members)
parts = name.split('.')
if doc.objtype in ('method', 'attribute'):
mod_name = '.'.join(parts[:-2])
cls_name = parts[-2]
obj_name = '.'.join(parts[-2:])
ns['class'] = cls_name
else:
mod_name, obj_name = '.'.join(parts[:-1]), parts[-1]
ns['fullname'] = name
ns['module'] = mod_name
ns['objname'] = obj_name
ns['name'] = parts[-1]
ns['objtype'] = doc.objtype
ns['underline'] = len(name) * '='
rendered = template.render(**ns)
f.write(rendered) # type: ignore
# descend recursively to new files
if new_files:
generate_autosummary_docs(new_files, output_dir=output_dir,
suffix=suffix, warn=warn, info=info,
base_path=base_path, builder=builder,
template_dir=template_dir)
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
# type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode]]
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
documented = [] # type: List[Tuple[unicode, unicode, unicode]]
for filename in filenames:
with codecs.open(filename, 'r', encoding='utf-8', # type: ignore
errors='ignore') as f:
lines = f.read().splitlines()
documented.extend(find_autosummary_in_lines(lines, # type: ignore
filename=filename))
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
# type: (unicode, Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
"""
try:
real_name, obj, parent, modname = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
return find_autosummary_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportError as e:
print("Failed to import '%s': %s" % (name, e))
except SystemExit as e:
print("Failed to import '%s'; the module executes module level "
"statement and it might call sys.exit()." % name)
return []
def find_autosummary_in_lines(lines, module=None, filename=None):
# type: (List[unicode], Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
"""Find out what items appear in autosummary:: directives in the
given lines.
Returns a list of (name, toctree, template) where *name* is a name
of an object and *toctree* the :toctree: path of the corresponding
autosummary directive (relative to the root of the file name), and
*template* the value of the :template: option. *toctree* and
*template* ``None`` if the directive does not have the
corresponding options set.
"""
autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*')
automodule_re = re.compile(
r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$')
module_re = re.compile(
r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
documented = [] # type: List[Tuple[unicode, unicode, unicode]]
toctree = None # type: unicode
template = None
current_module = module
in_autosummary = False
base_indent = ""
for line in lines:
if in_autosummary:
m = toctree_arg_re.match(line) # type: ignore
if m:
toctree = m.group(1)
if filename:
toctree = os.path.join(os.path.dirname(filename),
toctree)
continue
m = template_arg_re.match(line) # type: ignore
if m:
template = m.group(1).strip()
continue
if line.strip().startswith(':'):
continue # skip options
m = autosummary_item_re.match(line) # type: ignore
if m:
name = m.group(1).strip()
if name.startswith('~'):
name = name[1:]
if current_module and \
not name.startswith(current_module + '.'):
name = "%s.%s" % (current_module, name)
documented.append((name, toctree, template))
continue
if not line.strip() or line.startswith(base_indent + " "):
continue
in_autosummary = False
m = autosummary_re.match(line) # type: ignore
if m:
in_autosummary = True
base_indent = m.group(1)
toctree = None
template = None
continue
m = automodule_re.search(line) # type: ignore
if m:
current_module = m.group(1).strip()
# recurse into the automodule docstring
documented.extend(find_autosummary_in_docstring(
current_module, filename=filename))
continue
m = module_re.match(line) # type: ignore
if m:
current_module = m.group(2)
continue
return documented
if __name__ == '__main__':
main()
| 37.123626
| 114
| 0.577962
|
270f8d2368ef1ec1d66b4b1d096d66f743620362
| 6,613
|
py
|
Python
|
angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py
|
zhu8655/angr
|
c565292a2dd75a0eb77fad74a6b6dd2656216b1f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py
|
zhu8655/angr
|
c565292a2dd75a0eb77fad74a6b6dd2656216b1f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py
|
zhu8655/angr
|
c565292a2dd75a0eb77fad74a6b6dd2656216b1f
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import pyvex
import archinfo
from .... import options, BP_BEFORE
from ....blade import Blade
from ....annocfg import AnnotatedCFG
from ....exploration_techniques import Slicecutor
from .resolver import IndirectJumpResolver
l = logging.getLogger(name=__name__)
class OverwriteTmpValueCallback:
def __init__(self, gp_value):
self.gp_value = gp_value
def overwrite_tmp_value(self, state):
state.inspect.tmp_write_expr = state.solver.BVV(self.gp_value, state.arch.bits)
class MipsElfFastResolver(IndirectJumpResolver):
def __init__(self, project):
super(MipsElfFastResolver, self).__init__(project, timeless=True)
def filter(self, cfg, addr, func_addr, block, jumpkind):
if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )):
return False
return True
def resolve(self, cfg, addr, func_addr, block, jumpkind):
"""
Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.
:param cfg: A CFG instance.
:param int addr: IRSB address.
:param int func_addr: The function address.
:param pyvex.IRSB block: The IRSB.
:param str jumpkind: The jumpkind.
:return: If it was resolved and targets alongside it
:rtype: tuple
"""
project = self.project
b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True,
ignored_regs=('gp',), cross_insn_opt=False,
)
sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]
if not sources:
return False, []
source = sources[0]
source_addr = source[0]
annotated_cfg = AnnotatedCFG(project, None, detect_loops=False)
annotated_cfg.from_digraph(b.slice)
state = project.factory.blank_state(addr=source_addr, mode="fastpath",
remove_options=options.refs,
# suppress unconstrained stack reads for `gp`
add_options={options.SYMBOL_FILL_UNCONSTRAINED_MEMORY,
options.NO_CROSS_INSN_OPT
},
)
state.regs._t9 = func_addr
func = cfg.kb.functions.function(addr=func_addr)
gp_offset = project.arch.registers['gp'][0]
# see if gp is used at all
for stmt in project.factory.block(addr, cross_insn_opt=False).vex.statements:
if isinstance(stmt, pyvex.IRStmt.WrTmp) \
and isinstance(stmt.data, pyvex.IRExpr.Get) \
and stmt.data.offset == gp_offset:
gp_used = True
break
else:
gp_used = False
gp_value = None
if gp_used:
if 'gp' not in func.info:
# this might a special case: gp is only used once in this function, and it can be initialized right
# before its use site.
# however, it should have been determined in CFGFast
# cannot determine the value of gp. quit
pass
else:
gp_value = func.info['gp']
if gp_value is None:
l.warning('Failed to determine value of register gp for function %#x.', func.addr)
return False, []
# Special handling for cases where `gp` is stored on the stack
self._set_gp_load_callback(state, b, project, gp_offset, gp_value)
state.regs._gp = gp_value
simgr = self.project.factory.simulation_manager(state)
simgr.use_technique(Slicecutor(annotated_cfg))
simgr.run()
if simgr.cut:
# pick the successor that is cut right after executing `addr`
try:
target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr))
except StopIteration:
l.debug("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self))
return False, [ ]
target = target_state.addr
if self._is_target_valid(cfg, target):
l.debug("Indirect jump at %#x is resolved to target %#x.", addr, target)
return True, [ target ]
l.debug("Indirect jump at %#x is resolved to target %#x, which seems to be invalid.", addr, target)
return False, [ ]
l.debug("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self))
return False, [ ]
@staticmethod
def _set_gp_load_callback(state, blade, project, gp_offset, gp_value):
got_gp_stack_store = False
tmps = {}
for block_addr_in_slice in set(slice_node[0] for slice_node in blade.slice.nodes()):
for stmt in project.factory.block(block_addr_in_slice, cross_insn_opt=False).vex.statements:
if isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Load):
# Load from memory to a tmp - assuming it's loading from the stack
tmps[stmt.tmp] = 'stack'
elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset:
if isinstance(stmt.data, pyvex.IRExpr.RdTmp):
tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop
if tmps.get(tmp_offset, None) == 'stack':
# found the load from stack
# we must make sure value of that temporary variable equals to the correct gp value
state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE,
condition=lambda s, bbl_addr_=block_addr_in_slice,
tmp_offset_=tmp_offset:
s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_,
action=OverwriteTmpValueCallback(
gp_value).overwrite_tmp_value
)
got_gp_stack_store = True
break
if got_gp_stack_store:
break
| 43.222222
| 133
| 0.553909
|
907fe77f7a1362a5ad723c5b0dadaf065b6a3bfb
| 562
|
py
|
Python
|
inventories/migrations/0025_auto_20200602_1821.py
|
amado-developer/ReadHub-RestfulAPI
|
8d8b445c4a84810d52bbf78a2593e0b48351590c
|
[
"MIT"
] | null | null | null |
inventories/migrations/0025_auto_20200602_1821.py
|
amado-developer/ReadHub-RestfulAPI
|
8d8b445c4a84810d52bbf78a2593e0b48351590c
|
[
"MIT"
] | 7
|
2021-03-19T03:09:53.000Z
|
2022-01-13T02:48:44.000Z
|
inventories/migrations/0025_auto_20200602_1821.py
|
amado-developer/ReadHub-RestfulAPI
|
8d8b445c4a84810d52bbf78a2593e0b48351590c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-06-02 18:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('digital_books', '0008_digital_book_rating'),
('inventories', '0024_auto_20200602_1450'),
]
operations = [
migrations.AlterField(
model_name='inventory',
name='digital_book',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='digital_books.Digital_Book'),
),
]
| 26.761905
| 125
| 0.660142
|
0c9f5aa187e4da721d1293488c5624083106b2f9
| 803
|
py
|
Python
|
ProgettiHWSW/api.py
|
ArdaSeremet/progettihwsw
|
565d1fb35d88d3e7c272c03b8a190231179cce74
|
[
"MIT"
] | 1
|
2020-08-28T21:46:12.000Z
|
2020-08-28T21:46:12.000Z
|
ProgettiHWSW/api.py
|
ArdaSeremet/progettihwsw
|
565d1fb35d88d3e7c272c03b8a190231179cce74
|
[
"MIT"
] | null | null | null |
ProgettiHWSW/api.py
|
ArdaSeremet/progettihwsw
|
565d1fb35d88d3e7c272c03b8a190231179cce74
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Arda Seremet <ardaseremet@outlook.com>
import aiohttp
import async_timeout
from asyncio import TimeoutError
class API:
"""Class to interact with the API of ProgettiHWSW boards."""
def __init__(self, ip: str):
"""Initialize the API."""
self.ip = ip
async def request(self, path: str):
try:
with async_timeout.timeout(5):
async with aiohttp.request("GET", f"{self.ip}/{path}") as resp:
return await resp.text()
except TimeoutError:
return False
async def execute(self, code: int):
"""Make requests with API codes for boards."""
try:
return await self.request(f"index.htm?execute={code}")
except Exception:
return False
| 28.678571
| 79
| 0.596513
|
713b7f3e57676d34a8da9bfc4eb05c205744f2f1
| 1,007
|
py
|
Python
|
infinity/apps/infinite_redis/manager.py
|
ra101/Django-Infinity
|
9fd17c3c27e1d9f4c1796007b7dc053857edd294
|
[
"MIT"
] | null | null | null |
infinity/apps/infinite_redis/manager.py
|
ra101/Django-Infinity
|
9fd17c3c27e1d9f4c1796007b7dc053857edd294
|
[
"MIT"
] | null | null | null |
infinity/apps/infinite_redis/manager.py
|
ra101/Django-Infinity
|
9fd17c3c27e1d9f4c1796007b7dc053857edd294
|
[
"MIT"
] | null | null | null |
import redis
from django.conf import settings
from django.db.models.manager import BaseManager
from apps.infinite_redis.query import RedisQuerySet
# Redis DB
redis_ins = redis.StrictRedis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=0,
password=settings.REDIS_PASSWORD,
)
class RedisModelManager(BaseManager.from_queryset(RedisQuerySet)):
"""
Since Our Model is abstract so all operations will be performed from here
by using from_queryset we are adding all the methods of RedisQuerySet
to this manager class
"""
def __init__(self):
super().__init__()
self._db = redis_ins
def bulk_upsert(self, data_dict):
"""bulk upsert using mset which takes in dict"""
self._db.mset(data_dict)
def save(self, instance, value):
"""custom save to update db"""
self._db.set(instance.key, value)
def delete(self, instance):
"""custom delete to update db"""
self._db.delete(instance.key)
| 25.175
| 77
| 0.691162
|
56967a27884fdbc2a5fdcd117536785cb24d59ef
| 1,711
|
py
|
Python
|
app.py
|
minggli/mnist-dcgan
|
4315776c79745b0578b3601258d539821779dce1
|
[
"MIT"
] | 4
|
2018-11-22T09:44:35.000Z
|
2020-09-21T07:12:04.000Z
|
app.py
|
minggli/mnist-dcgan
|
4315776c79745b0578b3601258d539821779dce1
|
[
"MIT"
] | 19
|
2019-06-01T18:40:26.000Z
|
2022-03-11T23:13:33.000Z
|
app.py
|
minggli/mnist-dcgan
|
4315776c79745b0578b3601258d539821779dce1
|
[
"MIT"
] | 1
|
2019-05-23T02:00:51.000Z
|
2019-05-23T02:00:51.000Z
|
#!/usr/bin/env python
"""
app
flask app serving external client calls.
"""
import logging
from random import randint
from flask import Flask
from flask_restplus import Api, Namespace, Resource
from helper import _validate_integer
from serving import grpc_generate
from config import APP_CONFIG
logger = logging.getLogger(__name__)
application = Flask(__name__)
application.config.update(APP_CONFIG)
ns = Namespace('generate', description='Generate images.')
api = Api(
title='Wasserstein GAN',
version='1.0',
description='Generate images using GANs')
@ns.route('/', defaults={'digit': None})
@ns.route('/<int:digit>')
@ns.doc(params={'digit': '0-9 single integer'})
class Generate(Resource):
def get(self, digit):
if _validate_integer(digit) is None:
digit = randint(0, 9)
logger.info(f"request received to generate {digit}.")
img = grpc_generate(digit)
logger.info("image generated successfully.")
return img
api.add_namespace(ns)
api.init_app(application)
# TODO Wasserstein loss doesn't discriminate real or fake image like original
# GAN loss function, more work needed to reuse Critic/Discriminator
# to classify generated image. Loss function must include supervise element.
# @app.route('/predict', methods=['GET', 'POST'])
# def predict():
# if request.method == 'POST':
# if 'file' not in request.files:
# return redirect(request.url)
# f = request.files['file']
# prob = grpc_predict(f)
# return render_template('predict.html', result=prob)
# return render_template('predict.html')
if __name__ == '__main__':
application.run(host='0.0.0.0', port='8000', debug=True)
| 29
| 77
| 0.694915
|
eca8f8a33b98426ff2cb4a71dbc4e040dd68b439
| 1,017
|
py
|
Python
|
2015/day05/python/test_part1.py
|
jmkacz/practice-advent-of-code
|
c06f474576e91ed0778c8a30a51bad848a602eb6
|
[
"MIT"
] | null | null | null |
2015/day05/python/test_part1.py
|
jmkacz/practice-advent-of-code
|
c06f474576e91ed0778c8a30a51bad848a602eb6
|
[
"MIT"
] | null | null | null |
2015/day05/python/test_part1.py
|
jmkacz/practice-advent-of-code
|
c06f474576e91ed0778c8a30a51bad848a602eb6
|
[
"MIT"
] | null | null | null |
from part1 import compute_answer
def test_compute_answer_sample_1():
lines = ["ugknbfddgicrmopn"]
expected = 1
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_sample_2():
lines = ["aaa"]
expected = 1
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_sample_3():
lines = ["jchzalrnumimnmhp"]
expected = 0
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_sample_4():
lines = ["haegwjzuvuyypxyu"]
expected = 0
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_sample_5():
lines = ["dvszwmarrgswjxmb"]
expected = 0
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_full():
with open("../data/input.dat", "r") as infile:
lines = [line.strip() for line in infile.readlines()]
expected = 258
actual = compute_answer(lines)
assert actual == expected
| 22.108696
| 61
| 0.684366
|
25d23dca41f3848c0acde06b08cf5574d38495b0
| 1,505
|
py
|
Python
|
setup.py
|
VENULLLC/updater4pyi
|
8d55dd51160ab7c76ebf0bccffab8293668074b3
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
VENULLLC/updater4pyi
|
8d55dd51160ab7c76ebf0bccffab8293668074b3
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
VENULLLC/updater4pyi
|
8d55dd51160ab7c76ebf0bccffab8293668074b3
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import os.path
#from setuptools import setup
from setuptools import setup
import updater4pyi.upd_version
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(name='updater4pyi',
version=updater4pyi.upd_version.version_str,
description='Lightweight library for software auto-update for applications frozen with pyinstaller',
long_description=read('README.md'),
author='Philippe Faist',
# obfuscate e-mail in source script, will be in clear in the package
author_email=("".join([chr(ord(x)+1) for x in 'oghkhood-e`hrs?aktdvhm-bg'])),
url='https://github.com/phfaist/updater4pyi/',
license='BSD',
packages=['updater4pyi'],
package_data={'updater4pyi': [ 'cacert.pem', 'installers/unix/*.sh', 'installers/win/do_install.exe.zip' ]},
py_modules=[],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Software Distribution',
],
)
| 35.833333
| 114
| 0.630565
|
be57a9377780dcc62681525cf4cbfef45857ce03
| 1,296
|
py
|
Python
|
dataactcore/scripts/database_setup.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 1
|
2019-06-22T21:53:16.000Z
|
2019-06-22T21:53:16.000Z
|
dataactcore/scripts/database_setup.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 3
|
2021-08-22T11:47:45.000Z
|
2022-03-29T22:06:49.000Z
|
dataactcore/scripts/database_setup.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 1
|
2020-07-17T23:50:56.000Z
|
2020-07-17T23:50:56.000Z
|
import sqlalchemy_utils
import logging
from dataactcore.config import ALEMBIC_PATH, MIGRATION_PATH
from alembic.config import Config
from alembic import command
from sqlalchemy.exc import ProgrammingError
from dataactcore.interfaces.db import db_uri
def create_database(db_name):
"""Create specified database if it doesn't exist."""
connect_string = db_uri(db_name)
if not sqlalchemy_utils.database_exists(connect_string):
sqlalchemy_utils.create_database(connect_string)
def drop_database(db_name):
"""Drop specified database."""
connect_string = db_uri(db_name)
if sqlalchemy_utils.database_exists(connect_string):
sqlalchemy_utils.drop_database(connect_string)
def run_migrations():
"""Run Alembic migrations for a specific database/model set."""
logging.disable(logging.WARN)
alembic_cfg = Config(ALEMBIC_PATH)
alembic_cfg.set_main_option("script_location", MIGRATION_PATH)
try:
command.upgrade(alembic_cfg, "head")
except ProgrammingError as e:
if "relation" and "already exists" in str(e):
raise Exception("Cannot run initial db migration if tables "
"already exist. " + str(e))
else:
raise
finally:
logging.disable(logging.NOTSET)
| 33.230769
| 72
| 0.722222
|
13769a1470b344343e6028fda1a9aa7c060b8fc0
| 219
|
py
|
Python
|
Spotkanie 3/tr_01.py
|
abixadamj/lekcja-enter-przyklady
|
4f23ee32a139e955f992b727ad86c6effb87a6d6
|
[
"MIT"
] | null | null | null |
Spotkanie 3/tr_01.py
|
abixadamj/lekcja-enter-przyklady
|
4f23ee32a139e955f992b727ad86c6effb87a6d6
|
[
"MIT"
] | null | null | null |
Spotkanie 3/tr_01.py
|
abixadamj/lekcja-enter-przyklady
|
4f23ee32a139e955f992b727ad86c6effb87a6d6
|
[
"MIT"
] | null | null | null |
width = 5.3
height = 3.67
triangle_area = (width * height) / 2
print("Pole trójkąta wynosi {triangle_area} cm^2")
print(f"Pole trójkąta wynosi {triangle_area} cm^2")
print("Pole trójkąta wynosi", triangle_area, "cm^2")
| 31.285714
| 52
| 0.726027
|
69eccf7fc67f3d44635e4245ed3700455444ea39
| 11,546
|
py
|
Python
|
google/cloud/dialogflowcx_v3beta1/types/security_settings.py
|
galz10/python-dialogflow-cx
|
e24bdfd499952199dfbdaa5634061653da8ae1db
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/dialogflowcx_v3beta1/types/security_settings.py
|
galz10/python-dialogflow-cx
|
e24bdfd499952199dfbdaa5634061653da8ae1db
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/dialogflowcx_v3beta1/types/security_settings.py
|
galz10/python-dialogflow-cx
|
e24bdfd499952199dfbdaa5634061653da8ae1db
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.cx.v3beta1",
manifest={
"GetSecuritySettingsRequest",
"UpdateSecuritySettingsRequest",
"ListSecuritySettingsRequest",
"ListSecuritySettingsResponse",
"CreateSecuritySettingsRequest",
"DeleteSecuritySettingsRequest",
"SecuritySettings",
},
)
class GetSecuritySettingsRequest(proto.Message):
r"""The request message for
[SecuritySettingsService.GetSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.GetSecuritySettings].
Attributes:
name (str):
Required. Resource name of the settings. Format:
``projects/<Project ID>/locations/<Location ID>/securitySettings/<security settings ID>``.
"""
name = proto.Field(proto.STRING, number=1,)
class UpdateSecuritySettingsRequest(proto.Message):
r"""The request message for
[SecuritySettingsService.UpdateSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.UpdateSecuritySettings].
Attributes:
security_settings (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings):
Required. [SecuritySettings] object that contains values for
each of the fields to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields
get updated. If the mask is not present, all
fields will be updated.
"""
security_settings = proto.Field(
proto.MESSAGE, number=1, message="SecuritySettings",
)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class ListSecuritySettingsRequest(proto.Message):
r"""The request message for [SecuritySettings.ListSecuritySettings][].
Attributes:
parent (str):
Required. The location to list all security settings for.
Format: ``projects/<Project ID>/locations/<Location ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 20 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListSecuritySettingsResponse(proto.Message):
r"""The response message for [SecuritySettings.ListSecuritySettings][].
Attributes:
security_settings (Sequence[google.cloud.dialogflowcx_v3beta1.types.SecuritySettings]):
The list of security settings.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
security_settings = proto.RepeatedField(
proto.MESSAGE, number=1, message="SecuritySettings",
)
next_page_token = proto.Field(proto.STRING, number=2,)
class CreateSecuritySettingsRequest(proto.Message):
r"""The request message for [SecuritySettings.CreateSecuritySettings][].
Attributes:
parent (str):
Required. The location to create an
[SecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettings]
for. Format:
``projects/<Project ID>/locations/<Location ID>``.
security_settings (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings):
Required. The security settings to create.
"""
parent = proto.Field(proto.STRING, number=1,)
security_settings = proto.Field(
proto.MESSAGE, number=2, message="SecuritySettings",
)
class DeleteSecuritySettingsRequest(proto.Message):
r"""The request message for [SecuritySettings.DeleteSecuritySettings][].
Attributes:
name (str):
Required. The name of the
[SecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettings]
to delete. Format:
``projects/<Project ID>/locations/<Location ID>/securitySettings/<Security Settings ID>``.
"""
name = proto.Field(proto.STRING, number=1,)
class SecuritySettings(proto.Message):
r"""Represents the settings related to security issues, such as
data redaction and data retention. It may take hours for updates
on the settings to propagate to all the related components and
take effect.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
name (str):
Resource name of the settings. Required for the
[SecuritySettingsService.UpdateSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.UpdateSecuritySettings]
method.
[SecuritySettingsService.CreateSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.CreateSecuritySettings]
populates the name automatically. Format:
``projects/<Project ID>/locations/<Location ID>/securitySettings/<Security Settings ID>``.
display_name (str):
Required. The human-readable name of the
security settings, unique within the location.
redaction_strategy (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.RedactionStrategy):
Strategy that defines how we do redaction.
redaction_scope (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.RedactionScope):
Defines the data for which Dialogflow applies
redaction. Dialogflow does not redact data that
it does not have access to – for example, Cloud
logging.
inspect_template (str):
`DLP <https://cloud.google.com/dlp/docs>`__ inspect template
name. Use this template to define inspect base settings.
The ``DLP Inspect Templates Reader`` role is needed on the
Dialogflow service identity service account (has the form
``service-PROJECT_NUMBER@gcp-sa-dialogflow.iam.gserviceaccount.com``)
for your agent's project.
If empty, we use the default DLP inspect config.
The template name will have one of the following formats:
``projects/<Project ID>/locations/<Location ID>/inspectTemplates/<Template ID>``
OR
``organizations/<Organization ID>/locations/<Location ID>/inspectTemplates/<Template ID>``
Note: ``inspect_template`` must be located in the same
region as the ``SecuritySettings``.
deidentify_template (str):
`DLP <https://cloud.google.com/dlp/docs>`__ deidentify
template name. Use this template to define de-identification
configuration for the content.
The ``DLP De-identify Templates Reader`` role is needed on
the Dialogflow service identity service account (has the
form
``service-PROJECT_NUMBER@gcp-sa-dialogflow.iam.gserviceaccount.com``)
for your agent's project.
If empty, Dialogflow replaces sensitive info with
``[redacted]`` text.
The template name will have one of the following formats:
``projects/<Project ID>/locations/<Location ID>/deidentifyTemplates/<Template ID>``
OR
``organizations/<Organization ID>/locations/<Location ID>/deidentifyTemplates/<Template ID>``
Note: ``deidentify_template`` must be located in the same
region as the ``SecuritySettings``.
retention_window_days (int):
Retains data in interaction logging for the
specified number of days. This does not apply to
Cloud logging, which is owned by the user - not
Dialogflow.
User must set a value lower than Dialogflow's
default 365d TTL. Setting a value higher than
that has no effect.
A missing value or setting to 0 also means we
use Dialogflow's default TTL.
Note: Interaction logging is a limited access
feature. Talk to your Google representative to
check availability for you.
This field is a member of `oneof`_ ``data_retention``.
purge_data_types (Sequence[google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.PurgeDataType]):
List of types of data to remove when
retention settings triggers purge.
insights_export_settings (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.InsightsExportSettings):
Controls conversation exporting settings to Insights after
conversation is completed.
If
[retention_strategy][google.cloud.dialogflow.cx.v3beta1.SecuritySettings.retention_strategy]
is set to REMOVE_AFTER_CONVERSATION, Insights export is
disabled no matter what you configure here.
"""
class RedactionStrategy(proto.Enum):
r"""Defines how we redact data."""
REDACTION_STRATEGY_UNSPECIFIED = 0
REDACT_WITH_SERVICE = 1
class RedactionScope(proto.Enum):
r"""Defines what types of data to redact."""
REDACTION_SCOPE_UNSPECIFIED = 0
REDACT_DISK_STORAGE = 2
class PurgeDataType(proto.Enum):
r"""Type of data we purge after retention settings triggers
purge.
"""
PURGE_DATA_TYPE_UNSPECIFIED = 0
DIALOGFLOW_HISTORY = 1
class InsightsExportSettings(proto.Message):
r"""Settings for exporting conversations to
`Insights <https://cloud.google.com/dialogflow/priv/docs/insights>`__.
Attributes:
enable_insights_export (bool):
If enabled, we will automatically exports
conversations to Insights and Insights runs its
analyzers.
"""
enable_insights_export = proto.Field(proto.BOOL, number=1,)
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
redaction_strategy = proto.Field(proto.ENUM, number=3, enum=RedactionStrategy,)
redaction_scope = proto.Field(proto.ENUM, number=4, enum=RedactionScope,)
inspect_template = proto.Field(proto.STRING, number=9,)
deidentify_template = proto.Field(proto.STRING, number=17,)
retention_window_days = proto.Field(proto.INT32, number=6, oneof="data_retention",)
purge_data_types = proto.RepeatedField(proto.ENUM, number=8, enum=PurgeDataType,)
insights_export_settings = proto.Field(
proto.MESSAGE, number=13, message=InsightsExportSettings,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 40.798587
| 143
| 0.67911
|
01d252b4fd98b46982c886e8e0d350121739c6d9
| 6,079
|
py
|
Python
|
hwp_9/Les_9_1.py
|
drednout5786/Python-UII
|
2c3ea3884dfdc9fbb974e515b9736207f18bd1f4
|
[
"MIT"
] | null | null | null |
hwp_9/Les_9_1.py
|
drednout5786/Python-UII
|
2c3ea3884dfdc9fbb974e515b9736207f18bd1f4
|
[
"MIT"
] | 1
|
2019-12-19T11:22:12.000Z
|
2019-12-19T11:22:12.000Z
|
hwp_9/Les_9_1.py
|
drednout5786/Python-UII
|
2c3ea3884dfdc9fbb974e515b9736207f18bd1f4
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from statsmodels.stats.weightstats import zconfint
from scipy import stats
from statistics import mode
class EBM: # Доказа́тельная медици́на, англ. evidence-based medicine (EBM)
'''
Анализ одной переменной.
'''
def __init__(self, file, p_val): # Инициализация
self.file = file
self.p_val = p_val
def get_data_xlsx(self): # Считывание файла с данными
try:
self.df = pd.read_excel(self.file)
if (self.df.empty):
print("Файл '{}' пуст".format(self.file))
return False
else:
print("Файл '{}' загружен.".format(self.file))
print("Структура файла: \n", self.df.head(5))
return True
except (IOError, Exception) as e:
print(e)
#raise Exception(f'castom error{e}')
return False
def get_file_info(self): # Краткое описание файла
print("\nКраткое описание файла:")
print("Количество строк: {} Количество столбцов: {}".format(self.df.shape[0], self.df.shape[1]))
#print(f'Количество строк: {self.df.shape[0]} Количество столбцов: {self.df.shape[1]}')
def get_column_name(self): # Вопрос пользователю - какой столбец анализируем?
self.customer_choice = input('Введите название столбца, данные из которого хотите проанализировать?: {} \n'
'или 0, чтобы отказаться от выбора и окончить выполнение анализа.\n'.format(self.df.columns.tolist()))
while (self.customer_choice != "0") and (self.customer_choice not in list(self.df)):
print("Вы выбрали столбец: {}. Его нет в списке столбцов.".format(self.customer_choice))
self.customer_choice = input('Введите название столбца, данные из которого хотите проанализировать?: {} \n'
'или 0, чтобы отказаться от выбора и окончить выполнение анализа.\n'.format(self.df.columns.tolist()))
return self.customer_choice
def scale_type(self, var): # Определение типа шкалы переменной
if self.df[var].dtype in ['int8', 'int16', 'int32', 'int64']:
# print("Данные распределены по порядковой шкале.")
self.__scale_t = 'порядковая'
elif self.df[var].dtype in ['float32', 'float64']:
# print("Данные распределены по количественной шкале.")
self.__scale_t = 'количественная'
elif self.df[var].dtype in ['O']:
if type(self.df[var][0]) == np.str:
# print("Данные распределены по номинальной (категориальной) шкале.")
self.__scale_t = 'номинальная'
else:
# print("Данные распределены по неопределенной шкале.")
self.__scale_t = 'не определен'
else:
# print("Данные распределены по неопределенной шкале.")
self.__scale_t = 'не определен'
return self.__scale_t
def describe_value(self): # Краткое описание переменной
print("Анализ - Краткое описание переменной")
if (self.__scale_t == 'порядковая') or (self.__scale_t == 'количественная'):
print(self.df[self.customer_choice].describe())
else:
print(self.df[self.customer_choice].groupby(self.df[self.customer_choice]).count())
def normality_test(self, var): # Проверка на нормальность распределения
if self.num_var == "2" :
self.__scale_t = self.scale_type(var)
if (self.__scale_t == 'порядковая') or (self.__scale_t == 'количественная'):
SW = stats.shapiro(self.df[var])
if SW[1] > self.p_val:
self.__norm = 1
print("Тест Шапиро-Уилка на нормальность распределения: W = {:.6}, p = {:f}. Вывод: распределение нормально.".format(SW[0], SW[1]))
print('Среднее: {:.4} и 95% доверительный интервал: [{:.4}, {:.4}]'.format(np.mean(self.df[var], axis = 0), zconfint(self.df[var])[0],
zconfint(self.df[var])[1]))
else:
self.__norm = 0
print("Тест Шапиро-Уилка на нормальность распределения: W = {:.6}, p = {:f}. Вывод: распределение НЕ нормально.".format(SW[0], SW[1]))
else:
print("Данные не имеют количественной природы. Проверка на нормальность не требуется.")
self.__norm = -1
return self.__norm
def typical_value(self): # Расчет типичного значения выборки
if (self.__scale_t == 'порядковая') or (self.__scale_t == 'количественная'):
if self.__norm == 0:
q25, q50, q75 = np.percentile(self.df[self.customer_choice], [25, 50, 75])
print("Типичное значение выборки Медиана [Q1; Q3] = {} [{}; {}].".format(round(q50, 2), round(q25, 2), round(q75, 2)))
else:
print("Типичное значение выборки Среднее ± стандартное отклонение = {} ± {}.".format(round(np.mean(self.df[self.customer_choice], axis = 0), 2),
round(np.std(self.df[self.customer_choice], axis = 0), 2)))
elif self.__scale_t == 'номинальная':
print("Типичное значение выборки Мода = {}.".format(mode(self.df[self.customer_choice])))
if __name__ == '__main__':
EBM_easy = EBM("./для ЕВ.xlsx", 0.05)
if EBM_easy.get_data_xlsx(): # Считывание файла с данными
EBM_easy.get_file_info() # Краткое описание файла
while (EBM_easy.get_column_name() != '0'): # Вопрос пользователю - какой столбец анализируем?
EBM_easy.scale_type(EBM_easy.customer_choice) # Определение типа шкалы переменной
EBM_easy.describe_value() # Краткое описание переменной
EBM_easy.normality_test(EBM_easy.customer_choice) # Проверка на нормальность распределения
EBM_easy.typical_value() # Расчет типичного значения выборки
print('\nРабота программы по анализу данных закончена!')
| 54.276786
| 160
| 0.598454
|
1c52168f5a1830aeb1076b8be3de36d36b620de3
| 9,769
|
py
|
Python
|
Pyrado/scripts/hyperparam_optimization/hopt_qq-su_simopt-cem.py
|
KhanhThiVo/SimuRLacra
|
fdeaf2059c2ed80ea696f018c29290510b5c4cb9
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null |
Pyrado/scripts/hyperparam_optimization/hopt_qq-su_simopt-cem.py
|
KhanhThiVo/SimuRLacra
|
fdeaf2059c2ed80ea696f018c29290510b5c4cb9
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null |
Pyrado/scripts/hyperparam_optimization/hopt_qq-su_simopt-cem.py
|
KhanhThiVo/SimuRLacra
|
fdeaf2059c2ed80ea696f018c29290510b5c4cb9
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | 1
|
2020-11-24T15:25:26.000Z
|
2020-11-24T15:25:26.000Z
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Optimize the hyper-parameters of SimOpt for the Quanser Qube swing-up task.
"""
import functools
import optuna
import os.path as osp
import torch as to
import pyrado
from pyrado.algorithms.step_based.gae import GAE
from pyrado.algorithms.episodic.cem import CEM
from pyrado.algorithms.step_based.ppo import PPO
from pyrado.algorithms.meta.simopt import SimOpt
from pyrado.algorithms.episodic.sysid_via_episodic_rl import SysIdViaEpisodicRL
from pyrado.domain_randomization.domain_parameter import NormalDomainParam
from pyrado.domain_randomization.domain_randomizer import DomainRandomizer
from pyrado.environment_wrappers.domain_randomization import DomainRandWrapperLive, MetaDomainRandWrapper
from pyrado.environments.pysim.quanser_qube import QQubeSwingUpSim
from pyrado.logger.experiment import save_dicts_to_yaml, setup_experiment
from pyrado.logger.step import create_csv_step_logger
from pyrado.policies.special.domain_distribution import DomainDistrParamPolicy
from pyrado.policies.special.environment_specific import QQubeSwingUpAndBalanceCtrl
from pyrado.policies.feed_forward.fnn import FNNPolicy
from pyrado.sampling.parallel_rollout_sampler import ParallelRolloutSampler
from pyrado.spaces import ValueFunctionSpace
from pyrado.utils.argparser import get_argparser
from pyrado.utils.data_types import EnvSpec
from pyrado.utils.input_output import print_cbt
def train_and_eval(trial: optuna.Trial, study_dir: str, seed: int):
"""
Objective function for the Optuna `Study` to maximize.
.. note::
Optuna expects only the `trial` argument, thus we use `functools.partial` to sneak in custom arguments.
:param trial: Optuna Trial object for hyper-parameter optimization
:param study_dir: the parent directory for all trials in this study
:param seed: seed value for the random number generators, pass `None` for no seeding
:return: objective function value
"""
# Synchronize seeds between Optuna trials
pyrado.set_seed(seed)
# Environments
env_hparams = dict(dt=1 / 100.0, max_steps=600)
env_real = QQubeSwingUpSim(**env_hparams)
env_real.domain_param = dict(
Mr=0.095 * 0.9, # 0.095*0.9 = 0.0855
Mp=0.024 * 1.1, # 0.024*1.1 = 0.0264
Lr=0.085 * 0.9, # 0.085*0.9 = 0.0765
Lp=0.129 * 1.1, # 0.129*1.1 = 0.1419
)
env_sim = QQubeSwingUpSim(**env_hparams)
randomizer = DomainRandomizer(
NormalDomainParam(name="Mr", mean=0.0, std=1e6, clip_lo=1e-3),
NormalDomainParam(name="Mp", mean=0.0, std=1e6, clip_lo=1e-3),
NormalDomainParam(name="Lr", mean=0.0, std=1e6, clip_lo=1e-3),
NormalDomainParam(name="Lp", mean=0.0, std=1e6, clip_lo=1e-3),
)
env_sim = DomainRandWrapperLive(env_sim, randomizer)
dp_map = {
0: ("Mr", "mean"),
1: ("Mr", "std"),
2: ("Mp", "mean"),
3: ("Mp", "std"),
4: ("Lr", "mean"),
5: ("Lr", "std"),
6: ("Lp", "mean"),
7: ("Lp", "std"),
}
trafo_mask = [True] * 8
env_sim = MetaDomainRandWrapper(env_sim, dp_map)
# Subroutine for policy improvement
behav_policy_hparam = dict(hidden_sizes=[64, 64], hidden_nonlin=to.tanh)
behav_policy = FNNPolicy(spec=env_sim.spec, **behav_policy_hparam)
vfcn_hparam = dict(hidden_sizes=[64, 64], hidden_nonlin=to.tanh)
vfcn = FNNPolicy(spec=EnvSpec(env_sim.obs_space, ValueFunctionSpace), **vfcn_hparam)
critic_hparam = dict(
gamma=0.9885,
lamda=0.9648,
num_epoch=2,
batch_size=500,
standardize_adv=False,
lr=5.792e-4,
max_grad_norm=1.0,
)
critic = GAE(vfcn, **critic_hparam)
subrtn_policy_hparam = dict(
max_iter=200,
min_steps=3 * 23 * env_sim.max_steps,
num_epoch=7,
eps_clip=0.0744,
batch_size=500,
std_init=0.9074,
lr=3.446e-04,
max_grad_norm=1.0,
num_workers=1,
)
subrtn_policy = PPO(study_dir, env_sim, behav_policy, critic, **subrtn_policy_hparam)
# Subroutine for system identification
prior_std_denom = trial.suggest_uniform("prior_std_denom", 5, 20)
prior = DomainRandomizer(
NormalDomainParam(name="Mr", mean=0.095, std=0.095 / prior_std_denom),
NormalDomainParam(name="Mp", mean=0.024, std=0.024 / prior_std_denom),
NormalDomainParam(name="Lr", mean=0.085, std=0.085 / prior_std_denom),
NormalDomainParam(name="Lp", mean=0.129, std=0.129 / prior_std_denom),
)
ddp_policy = DomainDistrParamPolicy(
mapping=dp_map,
trafo_mask=trafo_mask,
prior=prior,
scale_params=trial.suggest_categorical("ddp_policy_scale_params", [True, False]),
)
subsubrtn_distr_hparam = dict(
max_iter=trial.suggest_categorical("subsubrtn_distr_max_iter", [20]),
pop_size=trial.suggest_int("pop_size", 50, 500),
num_init_states_per_domain=1,
num_is_samples=trial.suggest_int("num_is_samples", 5, 20),
expl_std_init=trial.suggest_loguniform("expl_std_init", 1e-3, 1e-1),
expl_std_min=trial.suggest_categorical("expl_std_min", [1e-4]),
extra_expl_std_init=trial.suggest_loguniform("expl_std_init", 1e-3, 1e-1),
extra_expl_decay_iter=trial.suggest_int("extra_expl_decay_iter", 0, 10),
num_workers=1,
)
csv_logger = create_csv_step_logger(osp.join(study_dir, f"trial_{trial.number}"))
subsubrtn_distr = CEM(study_dir, env_sim, ddp_policy, **subsubrtn_distr_hparam, logger=csv_logger)
obs_vel_weight = trial.suggest_loguniform("obs_vel_weight", 1, 100)
subrtn_distr_hparam = dict(
metric=None,
obs_dim_weight=[1, 1, 1, 1, obs_vel_weight, obs_vel_weight],
num_rollouts_per_distr=trial.suggest_int("num_rollouts_per_distr", 20, 100),
num_workers=1,
)
subrtn_distr = SysIdViaEpisodicRL(subsubrtn_distr, behav_policy, **subrtn_distr_hparam)
# Algorithm
algo_hparam = dict(
max_iter=trial.suggest_categorical("algo_max_iter", [10]),
num_eval_rollouts=trial.suggest_categorical("algo_num_eval_rollouts", [5]),
warmstart=trial.suggest_categorical("algo_warmstart", [True]),
thold_succ_subrtn=trial.suggest_categorical("algo_thold_succ_subrtn", [50]),
subrtn_snapshot_mode="latest",
)
algo = SimOpt(study_dir, env_sim, env_real, subrtn_policy, subrtn_distr, **algo_hparam, logger=csv_logger)
# Jeeeha
algo.train(seed=args.seed)
# Evaluate
min_rollouts = 1000
sampler = ParallelRolloutSampler(
env_real, algo.policy, num_workers=1, min_rollouts=min_rollouts
) # parallelize via optuna n_jobs
ros = sampler.sample()
mean_ret = sum([r.undiscounted_return() for r in ros]) / min_rollouts
return mean_ret
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
if args.dir is None:
ex_dir = setup_experiment(
"hyperparams", QQubeSwingUpSim.name, f"{SimOpt.name}-{CEM.name}_{QQubeSwingUpAndBalanceCtrl.name}_100Hz"
)
study_dir = osp.join(pyrado.TEMP_DIR, ex_dir)
print_cbt(f"Starting a new Optuna study.", "c", bright=True)
else:
study_dir = args.dir
if not osp.isdir(study_dir):
raise pyrado.PathErr(given=study_dir)
print_cbt(f"Continuing an existing Optuna study.", "c", bright=True)
name = f"{QQubeSwingUpSim.name}_{SimOpt.name}-{CEM.name}_{QQubeSwingUpAndBalanceCtrl.name}_100Hz"
study = optuna.create_study(
study_name=name,
storage=f"sqlite:////{osp.join(study_dir, f'{name}.db')}",
direction="maximize",
load_if_exists=True,
)
# Start optimizing
study.optimize(functools.partial(train_and_eval, study_dir=study_dir, seed=args.seed), n_trials=100, n_jobs=16)
# Save the best hyper-parameters
save_dicts_to_yaml(
study.best_params,
dict(seed=args.seed),
save_dir=study_dir,
file_name="best_hyperparams",
)
| 43.035242
| 116
| 0.713481
|
88aba2aa01535de69744892927df030d6e4380c9
| 2,725
|
py
|
Python
|
tools/perf/contrib/vr_benchmarks/webvr_sample_pages.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
tools/perf/contrib/vr_benchmarks/webvr_sample_pages.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
tools/perf/contrib/vr_benchmarks/webvr_sample_pages.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from contrib.vr_benchmarks.vr_sample_page import VrSamplePage
from contrib.vr_benchmarks.vr_story_set import VrStorySet
class WebVrSamplePage(VrSamplePage):
def __init__(self, page_set, url_parameters, sample_page,
extra_browser_args=None,):
super(WebVrSamplePage, self).__init__(
sample_page=sample_page,
page_set=page_set,
url_parameters=url_parameters,
extra_browser_args=extra_browser_args)
def RunPageInteractions(self, action_runner):
action_runner.TapElement(selector='canvas[id="webgl-canvas"]')
action_runner.MeasureMemory(True)
# We don't want to be in VR or on a page with a WebGL canvas at the end of
# the test, as this generates unnecessary heat while the trace data is being
# processed, so navigate to a blank page if we're on a platform that cares
# about the heat generation.
if self._shared_page_state.ShouldNavigateToBlankPageBeforeFinishing():
action_runner.Navigate("about:blank")
class WebVrSamplePageSet(VrStorySet):
"""A page set using the official WebVR sample with settings tweaked."""
def __init__(self, use_fake_pose_tracker=True):
super(WebVrSamplePageSet, self).__init__(
use_fake_pose_tracker=use_fake_pose_tracker)
# Test cases that use the synthetic cube field page
cube_test_cases = [
# Standard sample app with no changes
['canvasClickPresents=1', 'renderScale=1'],
# Increased render scale
['canvasClickPresents=1', 'renderScale=1.5'],
# Default render scale, increased load
['canvasClickPresents=1', 'renderScale=1', 'heavyGpu=1', 'cubeScale=0.2',
'workTime=5'],
# Further increased load
['canvasClickPresents=1', 'renderScale=1', 'heavyGpu=1', 'cubeScale=0.3',
'workTime=10'],
# Absurd load for fill-rate testing
['canvasClickPresents=1', 'renderScale=1.6', 'heavyGpu=1',
'cubeScale=0.3', 'workTime=4'],
]
for url_parameters in cube_test_cases:
# Standard set of pages with defaults
self.AddStory(WebVrSamplePage(self, url_parameters, 'test-slow-render'))
# Set of pages with standardized render size
self.AddStory(WebVrSamplePage(self, url_parameters + ['standardSize=1'],
'test-slow-render'))
# Test cases that use the 360 video page
video_test_cases = [
# Test using the default, low resolution video
['canvasClickPresents=1'],
]
for url_parameters in video_test_cases:
self.AddStory(WebVrSamplePage(self, url_parameters, 'XX-360-video'))
| 39.492754
| 80
| 0.711927
|
144ea82b1e44b223ad52714bb717b95923eb1802
| 181
|
py
|
Python
|
workbench/expenses/apps.py
|
yoshson/workbench
|
701558cac3357cd82e4dc99f0fefed12ee81ddc5
|
[
"MIT"
] | 15
|
2020-09-02T22:17:34.000Z
|
2022-02-01T20:09:10.000Z
|
workbench/expenses/apps.py
|
yoshson/workbench
|
701558cac3357cd82e4dc99f0fefed12ee81ddc5
|
[
"MIT"
] | 18
|
2020-01-08T15:28:26.000Z
|
2022-02-28T02:46:41.000Z
|
workbench/expenses/apps.py
|
yoshson/workbench
|
701558cac3357cd82e4dc99f0fefed12ee81ddc5
|
[
"MIT"
] | 8
|
2020-09-29T08:00:24.000Z
|
2022-01-16T11:58:19.000Z
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class Config(AppConfig):
name = "workbench.expenses"
verbose_name = _("expenses")
| 22.625
| 54
| 0.762431
|
2e18e41f0b3b97a27c48f4a70a834b94b867381f
| 629
|
py
|
Python
|
proxyclient/hv/trace_gpio.py
|
phire/m1n1
|
41f0874f9999d2ac95ef5418dfae4fdd42cdc507
|
[
"MIT"
] | 1,604
|
2021-01-14T19:04:59.000Z
|
2022-03-31T18:34:16.000Z
|
proxyclient/hv/trace_gpio.py
|
stvhay/asahilinux-m1n1
|
bad788e67e9f4da4bfc805415c8dd5726decb7df
|
[
"MIT"
] | 105
|
2021-01-15T03:52:27.000Z
|
2022-03-30T22:16:52.000Z
|
proxyclient/hv/trace_gpio.py
|
stvhay/asahilinux-m1n1
|
bad788e67e9f4da4bfc805415c8dd5726decb7df
|
[
"MIT"
] | 96
|
2021-01-14T21:13:53.000Z
|
2022-03-31T12:14:14.000Z
|
# SPDX-License-Identifier: MIT
from m1n1.trace.gpio import GPIOTracer
#trace_device("/arm-io/gpio", True)
# trace gpio interrups, useful to follow the cascaded interrupts
aic_phandle = getattr(hv.adt["/arm-io/aic"], "AAPL,phandle")
try:
node = hv.adt["/arm-io/gpio0"]
path = "/arm-io/gpio0"
except:
node = hv.adt["/arm-io/gpio"]
path = "/arm-io/gpio"
if getattr(node, "interrupt-parent") == aic_phandle:
for irq in getattr(node, "interrupts"):
hv.trace_irq(node.name, irq, 1, hv.IRQTRACE_IRQ)
GPIOTracer = GPIOTracer._reloadcls()
gpio_tracer = GPIOTracer(hv, path, verbose=0)
gpio_tracer.start()
| 27.347826
| 64
| 0.693164
|
bd3096bd22b278ec6095c72b869da29f87b6cefe
| 668
|
py
|
Python
|
backend/manage.py
|
dpouris/chore-battle
|
12284cc8d6566bbae368f8257f2a022d924deb71
|
[
"RSA-MD"
] | 1
|
2022-03-26T17:40:16.000Z
|
2022-03-26T17:40:16.000Z
|
backend/manage.py
|
dpouris/chore-battle
|
12284cc8d6566bbae368f8257f2a022d924deb71
|
[
"RSA-MD"
] | null | null | null |
backend/manage.py
|
dpouris/chore-battle
|
12284cc8d6566bbae368f8257f2a022d924deb71
|
[
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chore_battle.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.043478
| 76
| 0.681138
|
db742602a5cac6120aa55270d550a095efee8d81
| 22,148
|
py
|
Python
|
sonnet/python/modules/basic_rnn.py
|
ishandutta2007/sonnet
|
417a5bb73e579963728e3f9b40ad583ac014601a
|
[
"Apache-2.0"
] | 345
|
2017-08-23T13:48:50.000Z
|
2022-03-17T05:43:34.000Z
|
sonnet/python/modules/basic_rnn.py
|
ishandutta2007/sonnet
|
417a5bb73e579963728e3f9b40ad583ac014601a
|
[
"Apache-2.0"
] | 8
|
2017-09-30T15:01:23.000Z
|
2019-12-18T08:46:08.000Z
|
sonnet/python/modules/basic_rnn.py
|
ishandutta2007/sonnet
|
417a5bb73e579963728e3f9b40ad583ac014601a
|
[
"Apache-2.0"
] | 224
|
2017-08-31T01:10:55.000Z
|
2022-03-09T06:14:12.000Z
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic RNN Cores for TensorFlow snt.
This file contains the definitions of the simplest building blocks for Recurrent
Neural Networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from sonnet.python.modules import basic
from sonnet.python.modules import rnn_core
from sonnet.python.modules import util
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import nest
def _get_flat_core_sizes(cores):
"""Obtains the list flattened output sizes of a list of cores.
Args:
cores: list of cores to get the shapes from.
Returns:
List of lists that, for each core, contains the list of its output
dimensions.
"""
core_sizes_lists = []
for core in cores:
flat_output_size = nest.flatten(core.output_size)
core_sizes_lists.append([tensor_shape.as_shape(size).as_list()
for size in flat_output_size])
return core_sizes_lists
def _get_shape_without_batch_dimension(tensor_nest):
"""Converts Tensor nest to a TensorShape nest, removing batch dimension."""
def _strip_batch_and_convert_to_shape(tensor):
return tensor[0].get_shape()
return nest.map_structure(_strip_batch_and_convert_to_shape, tensor_nest)
class VanillaRNN(rnn_core.RNNCore):
"""Basic fully connected vanilla RNN core."""
IN_TO_HIDDEN = "in_to_hidden"
HIDDEN_TO_HIDDEN = "hidden_to_hidden"
POSSIBLE_INITIALIZER_KEYS = {IN_TO_HIDDEN, HIDDEN_TO_HIDDEN}
def __init__(self, hidden_size, activation=tf.tanh, initializers=None,
partitioners=None, regularizers=None, name="vanilla_rnn"):
"""Construct a Basic RNN core.
Args:
hidden_size: hidden size dimensionality.
activation: activation function to use.
initializers: optional dict containing ops to initialize the weights. This
dictionary may contain the keys 'in_to_hidden' and/or
'hidden_to_hidden'.
partitioners: optional dict containing ops to partition the weights. This
dictionary may contain the keys 'in_to_hidden' and/or
'hidden_to_hidden'.
regularizers: optional dict containing ops to regularize the weights. This
dictionary may contain the keys 'in_to_hidden' and/or
'hidden_to_hidden'.
name: name of the module.
Raises:
KeyError: if `initializers` contains any keys other than 'in_to_hidden' or
'hidden_to_hidden'.
KeyError: if `partitioners` contains any keys other than 'in_to_hidden' or
'hidden_to_hidden'.
KeyError: if `regularizers` contains any keys other than 'in_to_hidden' or
'hidden_to_hidden'.
TypeError: If any of the given initializers are not callable.
TypeError: If any of the given partitioners are not callable.
TypeError: If any of the given regularizers are not callable.
"""
super(VanillaRNN, self).__init__(name=name)
self._hidden_size = hidden_size
self._activation = activation
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
def _build(self, input_, prev_state):
"""Connects the VanillaRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
input_: a 2D Tensor of size [batch_size, input_size].
prev_state: a 2D Tensor of size [batch_size, hidden_size].
Returns:
output: a 2D Tensor of size [batch_size, hidden_size].
next_state: a Tensor of size [batch_size, hidden_size].
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
self._in_to_hidden_linear = basic.Linear(
self._hidden_size, name="in_to_hidden",
initializers=self._initializers.get("in_to_hidden"),
partitioners=self._partitioners.get("in_to_hidden"),
regularizers=self._regularizers.get("in_to_hidden"))
self._hidden_to_hidden_linear = basic.Linear(
self._hidden_size, name="hidden_to_hidden",
initializers=self._initializers.get("hidden_to_hidden"),
partitioners=self._partitioners.get("hidden_to_hidden"),
regularizers=self._regularizers.get("hidden_to_hidden"))
in_to_hidden = self._in_to_hidden_linear(input_)
hidden_to_hidden = self._hidden_to_hidden_linear(prev_state)
output = self._activation(in_to_hidden + hidden_to_hidden)
# For VanillaRNN, the next state of the RNN is the same as the output
return output, output
@property
def in_to_hidden_linear(self):
self._ensure_is_connected()
return self._in_to_hidden_linear
@property
def hidden_to_hidden_linear(self):
self._ensure_is_connected()
return self._hidden_to_hidden_linear
@property
def in_to_hidden_variables(self):
self._ensure_is_connected()
return self._in_to_hidden_linear.get_variables()
@property
def hidden_to_hidden_variables(self):
self._ensure_is_connected()
return self._hidden_to_hidden_linear.get_variables()
@property
def state_size(self):
return tf.TensorShape([self._hidden_size])
@property
def output_size(self):
return tf.TensorShape([self._hidden_size])
class DeepRNN(rnn_core.RNNCore):
"""RNN core that passes data through a number of internal modules or ops.
This module is constructed by passing an iterable of externally constructed
modules or ops. The DeepRNN takes `(input, prev_state)` as input and passes
the input through each internal module in the order they were presented,
using elements from `prev_state` as necessary for internal recurrent cores.
The output is `(output, next_state)` in common with other RNN cores.
By default, skip connections from the input to all internal modules and from
each intermediate output to the final output are used.
E.g.:
```python
lstm1 = snt.LSTM(hidden_size=256)
lstm2 = snt.LSTM(hidden_size=256)
deep_rnn = snt.DeepRNN([lstm1, lstm2])
output, next_state = deep_rnn(input, prev_state)
```
The computation set up inside the DeepRNN has the same effect as:
```python
prev_state1, prev_state2 = prev_state
lstm1_output, next_state1 = lstm1(input, prev_state1)
lstm2_output, next_state2 = lstm(
tf.concat([input, lstm1_output], 1), prev_state2)
next_state = (next_state1, next_state2)
output = tf.concat([lstm1_output, lstm2_output], 1)
```
Every internal module receives the preceding module's output and the entire
core's input. The output is created by concatenating each internal module's
output. In the case of internal recurrent elements, corresponding elements
of the state are used such that `state[i]` is passed to the `i`'th internal
recurrent element. Note that the state of a `DeepRNN` is always a tuple, which
will contain the same number of elements as there are internal recurrent
cores. If no internal modules are recurrent, the state of the DeepRNN as a
whole is the empty tuple. Wrapping non-recurrent modules into a DeepRNN can
be useful to produce something API compatible with a "real" recurrent module,
simplifying code that handles the cores.
Without skip connections the previous example would become the following
(note the only difference is the addition of `skip_connections=False`):
```python
# ... declare other modules as above
deep_rnn = snt.DeepRNN([lin, tanh, lstm], skip_connections=False)
output, next_state = deep_rnn(input, prev_state)
```
which is equivalent to:
```python
lin_output = lin(input)
tanh_output = tanh(lin_output)
lstm_output, lstm_next_state = lstm(tanh_output, prev_state[0])
next_state = (lstm_next_state,)
output = lstm_output
```
Note: when using skip connections, all the cores should be recurrent.
"""
def __init__(self, cores, skip_connections=True,
concat_final_output_if_skip=True, name="deep_rnn"):
"""Construct a Deep RNN core.
Args:
cores: iterable of modules or ops.
skip_connections: a boolean that indicates whether to use skip
connections. This means that the input is fed to all the layers, after
being concatenated with the output of the previous layer. The output
of the module will be the concatenation of all the outputs of the
internal modules.
concat_final_output_if_skip: A boolean that indicates whether the outputs
of intermediate layers should be concatenated into the timestep-wise
output of the core. By default this is True. If this is set to False,
then the core output is that of the final layer, i.e. that of
`cores[-1]`.
name: name of the module.
Raises:
ValueError: if `cores` is not an iterable, or if `skip_connections` is
True and not all the modules are recurrent.
"""
super(DeepRNN, self).__init__(name=name)
if not isinstance(cores, collections.Iterable):
raise ValueError("Cores should be an iterable object.")
self._cores = tuple(cores)
self._skip_connections = skip_connections
self._concat_final_output_if_skip = concat_final_output_if_skip
self._is_recurrent_list = [isinstance(core, rnn_core.RNNCore)
for core in self._cores]
if self._skip_connections:
tf.logging.warning(
"The `skip_connections` argument will be deprecated. Please use "
"snt.SkipConnectionCore instead."
)
if not all(self._is_recurrent_list):
raise ValueError("skip_connections are enabled but not all cores are "
"`snt.RNNCore`s, which is not supported. The following"
" cores were specified: {}.".format(self._cores))
self._check_cores_output_sizes()
self._num_recurrent = sum(self._is_recurrent_list)
def _check_cores_output_sizes(self):
"""Checks the output_sizes of the cores of the DeepRNN module.
Raises:
ValueError: if the outputs of the cores cannot be concatenated along their
first dimension.
"""
for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))):
first_core_list = core_sizes[0][1:]
for i, core_list in enumerate(core_sizes[1:]):
if core_list[1:] != first_core_list:
raise ValueError("The outputs of the provided cores are not able "
"to be concatenated along the first feature "
"dimension. Core 0 has size %s, whereas Core %d "
"has size %s" % (first_core_list, i, core_list))
def _build(self, inputs, prev_state):
"""Connects the DeepRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: a nested tuple of Tensors of arbitrary dimensionality, with at
least an initial batch dimension.
prev_state: a tuple of `prev_state`s that corresponds to the state
of each one of the cores of the `DeepCore`.
Returns:
output: a nested tuple of Tensors of arbitrary dimensionality, with at
least an initial batch dimension.
next_state: a tuple of `next_state`s that corresponds to the updated state
of each one of the cores of the `DeepCore`.
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations. This may happen if one connects a module any time after the
first time that does not have the configuration of skip connections as
the first time.
"""
current_input = inputs
next_states = []
outputs = []
recurrent_idx = 0
for i, core in enumerate(self._cores):
if self._skip_connections and i > 0:
flat_input = (nest.flatten(inputs), nest.flatten(current_input))
flat_input = [tf.concat(input_, 1) for input_ in zip(*flat_input)]
current_input = nest.pack_sequence_as(structure=inputs,
flat_sequence=flat_input)
# Determine if this core in the stack is recurrent or not and call
# accordingly.
if self._is_recurrent_list[i]:
current_input, next_state = core(current_input,
prev_state[recurrent_idx])
next_states.append(next_state)
recurrent_idx += 1
else:
current_input = core(current_input)
if self._skip_connections:
outputs.append(current_input)
if self._skip_connections and self._concat_final_output_if_skip:
flat_outputs = tuple(nest.flatten(output) for output in outputs)
flat_outputs = [tf.concat(output, 1) for output in zip(*flat_outputs)]
output = nest.pack_sequence_as(structure=outputs[0],
flat_sequence=flat_outputs)
else:
output = current_input
return output, tuple(next_states)
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
trainable_initializers=None, trainable_regularizers=None,
name=None):
"""Builds the default start state for a DeepRNN.
Args:
batch_size: An int, float or scalar Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
trainable_initializers: An initializer function or nested structure of
functions with same structure as the `state_size` property of the
core, to be used as initializers of the initial state variable.
trainable_regularizers: Optional regularizer function or nested structure
of functions with the same structure as the `state_size` property of the
core, to be used as regularizers of the initial state variable. A
regularizer should be a function that takes a single `Tensor` as an
input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
name: Optional string used to prefix the initial state variable names, in
the case of a trainable initial state. If not provided, defaults to
the name of the module.
Returns:
A tensor or nested tuple of tensors with same structure and shape as the
`state_size` property of the core.
Raises:
ValueError: if the number of passed initializers is not the same as the
number of recurrent cores.
"""
initial_state = []
if trainable_initializers is None:
trainable_initializers = [None] * self._num_recurrent
if trainable_regularizers is None:
trainable_regularizers = [None] * self._num_recurrent
num_initializers = len(trainable_initializers)
if num_initializers != self._num_recurrent:
raise ValueError("The number of initializers and recurrent cores should "
"be the same. Received %d initializers for %d specified "
"recurrent cores."
% (num_initializers, self._num_recurrent))
with tf.name_scope(self._initial_state_scope(name)):
recurrent_idx = 0
for is_recurrent, core in zip(self._is_recurrent_list, self._cores):
if is_recurrent:
core_initial_state = core.initial_state(
batch_size, dtype=dtype, trainable=trainable,
trainable_initializers=trainable_initializers[recurrent_idx],
trainable_regularizers=trainable_regularizers[recurrent_idx])
initial_state.append(core_initial_state)
recurrent_idx += 1
return tuple(initial_state)
@property
def state_size(self):
sizes = []
for is_recurrent, core in zip(self._is_recurrent_list, self._cores):
if is_recurrent:
sizes.append(core.state_size)
return tuple(sizes)
@property
def output_size(self):
if self._skip_connections and self._concat_final_output_if_skip:
output_size = []
for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))):
added_core_size = core_sizes[0]
added_core_size[0] = sum([size[0] for size in core_sizes])
output_size.append(tf.TensorShape(added_core_size))
return nest.pack_sequence_as(structure=self._cores[0].output_size,
flat_sequence=output_size)
else:
# Assumes that an element of cores which does not have the output_size
# property does not affect the output shape. Then the 'last' core in the
# sequence with output_size information should be the output_size of the
# DeepRNN. This heuristic is error prone, but we would lose a lot of
# flexibility if we tried to enforce that the final core must have an
# output_size field (e.g. it would be impossible to add a TF nonlinearity
# as the final "core"), but we should at least print a warning if this
# is the case.
final_core = self._cores[-1]
if hasattr(final_core, "output_size"):
# This is definitely the correct value, so no warning needed.
return final_core.output_size
# If we have connected the module at least once, we can get the output
# size of whatever was actually produced. The indexing of [-1] gets us
# the most recent connection, and [0] gets us the first element of the
# output tuple as opposed to the recurrent state.
if self._connected_subgraphs:
last_connected_output_size = _get_shape_without_batch_dimension(
self._connected_subgraphs[-1].outputs[0])
tf.logging.warning(
"Final core does not contain .output_size, but the "
"DeepRNN has been connected into the graph, so inferred output "
"size as %s", last_connected_output_size)
return last_connected_output_size
# If all else fails, iterate backwards through cores and return the
# first one which has an output_size field. This can be incorrect in
# various ways, so warn loudly.
try:
guessed_output_size = next(core.output_size
for core in reversed(self._cores)
if hasattr(core, "output_size"))
except StopIteration:
raise ValueError("None of the 'cores' have output_size information.")
tf.logging.warning(
"Trying to infer output_size of DeepRNN, but the final core %s does "
"not have the .output_size field. The guessed output_size is %s "
"but this may not be correct. If you see shape errors following this "
"warning, you must change the cores used in the DeepRNN so that "
"the final core used has a correct .output_size property.",
final_core, guessed_output_size)
return guessed_output_size
class ModelRNN(rnn_core.RNNCore):
"""RNNCore that ignores input and uses a model to compute its next state."""
def __init__(self, model, name="model_rnn"):
"""Construct a Basic RNN core.
Args:
model: callable that computes the next state.
name: name of the module.
Raises:
TypeError: if model is not a callable object or if it is an RNNCore.
AttributeError: if model does not have an output_size attribute.
"""
super(ModelRNN, self).__init__(name=name)
if not callable(model):
raise TypeError("Model must be callable.")
if isinstance(model, rnn_core.RNNCore):
raise TypeError("Model should not be an RNNCore.")
try:
self._output_size = model.output_size
except AttributeError:
raise AttributeError("Model should have an output_size attribute.")
self._model = model
def _build(self, inputs, prev_state):
"""Connects the ModelRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor input to the ModelRNN (ignored).
prev_state: Tensor of size `model.output_size`.
Returns:
output: Tensor of size `model.output_size`.
next_state: Tensor of size `model.output_size`.
"""
next_state = self._model(prev_state)
# For ModelRNN, the next state of the RNN is the same as the output
return next_state, next_state
@property
def state_size(self):
return self._output_size
@property
def output_size(self):
return self._output_size
| 40.489945
| 80
| 0.700966
|
56d13b6029458edef06857a73013fd6757dafe1c
| 532
|
py
|
Python
|
melodb/loggers/ConsoleLogger.py
|
omarboukhris/melodb
|
043907857cd7a73857d8d9b06be0a2282f740253
|
[
"BSL-1.0"
] | null | null | null |
melodb/loggers/ConsoleLogger.py
|
omarboukhris/melodb
|
043907857cd7a73857d8d9b06be0a2282f740253
|
[
"BSL-1.0"
] | null | null | null |
melodb/loggers/ConsoleLogger.py
|
omarboukhris/melodb
|
043907857cd7a73857d8d9b06be0a2282f740253
|
[
"BSL-1.0"
] | null | null | null |
from melodb.loggers.ILogger import ILogger
from datetime import datetime
class ConsoleLogger(ILogger):
def __init__(self, component: str):
super(ConsoleLogger, self).__init__(component)
def info(self, log_message: str):
print(
f"INFO [{datetime.now()}] [{self.component}]: {log_message}"
)
def warn(self, log_message: str):
print(
f"WARN [{datetime.now()}] [{self.component}]: {log_message}"
)
def error(self, log_message: str):
print(
f"ERROR [{datetime.now()}] [{self.component}]: {log_message}"
)
| 23.130435
| 64
| 0.68797
|
4bc64c83cb67b748c8356c55f47d67b161788b67
| 465
|
py
|
Python
|
igem_tuebingen_website/handlers/errors.py
|
blue1stone/igem_tuebingen_website
|
f0fee8d1d92459b17892fbeed1cab8fbc714316f
|
[
"MIT"
] | null | null | null |
igem_tuebingen_website/handlers/errors.py
|
blue1stone/igem_tuebingen_website
|
f0fee8d1d92459b17892fbeed1cab8fbc714316f
|
[
"MIT"
] | null | null | null |
igem_tuebingen_website/handlers/errors.py
|
blue1stone/igem_tuebingen_website
|
f0fee8d1d92459b17892fbeed1cab8fbc714316f
|
[
"MIT"
] | null | null | null |
from flask import render_template
from ..app import app
@app.errorhandler(404)
def page_not_found(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('errors/500.html'), 500
@app.errorhandler(403)
def access_forbidden(error):
return render_template('errors/403.html'), 403
@app.errorhandler(410)
def page_gone(error):
return render_template('errors/410.html'), 410
| 20.217391
| 50
| 0.754839
|
a59846e40600c6672e45266592c70a13e5b9d24d
| 1,497
|
py
|
Python
|
script/migrations/0001_initial.py
|
satamame/pscweb2
|
f15f6e2594a7339e4e964f2cb4d7363743b8cbd6
|
[
"MIT"
] | null | null | null |
script/migrations/0001_initial.py
|
satamame/pscweb2
|
f15f6e2594a7339e4e964f2cb4d7363743b8cbd6
|
[
"MIT"
] | null | null | null |
script/migrations/0001_initial.py
|
satamame/pscweb2
|
f15f6e2594a7339e4e964f2cb4d7363743b8cbd6
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.8 on 2019-12-29 04:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Script',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='題名')),
('author', models.CharField(blank=True, max_length=50, verbose_name='著者')),
('raw_data', models.TextField(blank=True, verbose_name='データ')),
('format', models.IntegerField(choices=[(1, 'Fountain JA')], default=1, verbose_name='フォーマット')),
('create_dt', models.DateTimeField(auto_now_add=True, verbose_name='作成日時')),
('modify_dt', models.DateTimeField(auto_now=True, verbose_name='変更日時')),
('public_level', models.IntegerField(choices=[(1, '公開しない'), (2, 'PSCWEB2 ユーザ')], default=1, verbose_name='公開レベル')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='所有者')),
],
options={
'verbose_name': '台本',
'verbose_name_plural': '台本',
},
),
]
| 41.583333
| 139
| 0.607214
|
477ec0c0f1488de6dbcb27b0146cfbe0319b079c
| 4,158
|
py
|
Python
|
verify-certificates.py
|
nanobot248/ssltools
|
7a940ac201559386793996eacad32e8e6dc9405e
|
[
"MIT"
] | null | null | null |
verify-certificates.py
|
nanobot248/ssltools
|
7a940ac201559386793996eacad32e8e6dc9405e
|
[
"MIT"
] | null | null | null |
verify-certificates.py
|
nanobot248/ssltools
|
7a940ac201559386793996eacad32e8e6dc9405e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import OpenSSL.crypto as crypto
from ssltools.certificates import verify_certificate, find_certificates, load_certificate_store, certificate_to_dict
import json
from ssltools.json import to_json
import argparse
from jsonpointer import resolve_pointer
from jsonpath_rw import jsonpath, parse
if __name__ == "__main__":
cli = argparse.ArgumentParser(description = "Verify the certificates provided in PEM format on STDIN.")
cli.add_argument("--summary", "-s", dest = "summary", action = "store_true",
help = "Only return a summary value 'Valid' or 'Invalid'.")
cli.add_argument("--only", "-o", dest = "only", choices = ["valid", "invalid"],
help = "Return an array of either only valid or invalid certificates.")
cli.add_argument("--json-pointer", dest = "json_pointer", type = str, nargs = 1,
help = "JSON pointer query string (RFC6901) to get a specific attribute from the result. " +
"This is not applied in --summary mode.")
cli.add_argument("--json-path", dest = "json_path", nargs = "+",
help = "JSON path (http://goessner.net/articles/JsonPath/) filter string " +
"to query a subset of the result data. Multiple queries can be specified that are executed in " +
"order on the result of the previous query. This parameter is not used if " +
"--summary mode is used.")
cli.add_argument("-u", "--unwrap", dest = "unwrap", action = "store_true",
help = "Unwrap transforms different data types into a simpler format. If a result is a simple string, " +
"or a datetime the quotes are removed. If the result is an X509 name, its parts are joined to a string " +
"in the way used by openssl (C=..., O=..., OU=..., CN=...). Unwrap has no effect on " +
"a --summary value.")
args = cli.parse_args()
cert = sys.stdin.read()
certs = find_certificates(cert)
in_certs = []
for cert in certs:
in_certs.append(crypto.load_certificate(crypto.FILETYPE_PEM, cert))
store = load_certificate_store()
good_certs = []
while len(in_certs) > 0:
good = []
for cert in in_certs:
if verify_certificate(cert, store):
good.append(cert)
if len(good) < 1: break
for cert in good:
try: store.add_cert(cert)
except: pass
in_certs.remove(cert)
good_certs.append(cert)
if args.summary:
only_valid = len(in_certs) < 1
result = ["Invalid", "Valid"][only_valid]
print result
sys.exit(0)
tmp_certs = []
for cert in good_certs:
cert = certificate_to_dict(cert)
cert["verificationValid"] = True
tmp_certs.append(cert)
good_certs = tmp_certs
tmp_certs = []
for cert in in_certs:
cert = certificate_to_dict(cert)
cert["verificationValid"] = False
tmp_certs.append(cert)
in_certs = tmp_certs
tmp_certs = None
out_certs = None
if args.only != None:
if args.only == "valid":
out_certs = good_certs
else:
out_certs = in_certs
else:
out_certs = []
for cert in in_certs: out_certs.append(cert)
for cert in good_certs: out_certs.append(cert)
if args.json_path != None and len(args.json_path) > 0:
for pathExpression in args.json_path:
expr = parse(pathExpression)
out_certs = [match.value for match in expr.find(out_certs)]
if args.json_pointer != None and len(args.json_pointer) > 0:
pointer = args.json_pointer[0]
out_certs = resolve_pointer(out_certs, pointer)
if args.unwrap and isinstance(out_certs, str):
jsonData = out_certs
elif args.unwrap and isinstance(jsonCerts, datetime):
jsonData = out_certs.isoformat()
elif args.unwrap and isinstance(out_certs, dict):
jsonData = ""
for key in out_certs:
jsonData += key + "=" + out_certs[key] + ", "
if len(jsonData) > 0: jsonData = jsonData[0:-2]
else:
jsonData = to_json(out_certs, pretty = True)
print jsonData
| 38.146789
| 116
| 0.633237
|
e3da1d0e75cf32b6d5100db02234098dfa0fd253
| 163
|
py
|
Python
|
examples/docs_snippets/docs_snippets/guides/dagster/dagster_type_factories/schema_execution.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-01-31T19:16:29.000Z
|
2021-01-31T19:16:29.000Z
|
examples/docs_snippets/docs_snippets/guides/dagster/dagster_type_factories/schema_execution.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | null | null | null |
examples/docs_snippets/docs_snippets/guides/dagster/dagster_type_factories/schema_execution.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2019-09-11T03:02:27.000Z
|
2019-09-11T03:02:27.000Z
|
from .schema import df, trips_schema
trips_schema.validate(df)
# => SchemaError: non-nullable series 'end_time' contains null values:
# => 22 NaT
# => 43 NaT
| 23.285714
| 70
| 0.711656
|
7ae193bf34a54e31588a5d9c60c7a4fdf2ec54e8
| 3,947
|
py
|
Python
|
sample/collect/service_imp/model/model_save.py
|
SelfDown/omnis-collect3
|
84f16ed108e4295719cf943b573aeb4ae3fe9c75
|
[
"MIT"
] | null | null | null |
sample/collect/service_imp/model/model_save.py
|
SelfDown/omnis-collect3
|
84f16ed108e4295719cf943b573aeb4ae3fe9c75
|
[
"MIT"
] | null | null | null |
sample/collect/service_imp/model/model_save.py
|
SelfDown/omnis-collect3
|
84f16ed108e4295719cf943b573aeb4ae3fe9c75
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Time: 2021/7/14 16:32
@Author: zzhang zzhang@cenboomh.com
@File: ModelUpdate.py
@desc:
"""
from collect.collect_service import CollectService
from collect.utils.collect_utils import get_safe_data
class ModelSaveService(CollectService):
def __init__(self, op_user):
CollectService.__init__(self, op_user)
pass
def get_exclude_save_field_name(self):
return self.const["exclude_save_field_name"]
def get_exclude_save_field(self):
return get_safe_data(self.get_exclude_save_field_name(), self.template)
def handler_model_params(self, params):
return CollectService.result(self, params)
def validate_model(self, model_obj):
from django.core.exceptions import ValidationError
try:
model_obj.full_clean()
except ValidationError as e:
msg_list = []
md = e.message_dict
for field_key in md:
template_params = self.get_template_params()
param = get_safe_data(field_key, template_params)
name = field_key
detail = md[field_key]
if param:
n = get_safe_data("name", param)
if n:
name = n
msg = "字段 【{name}】错误: {detail}".format(name=name, detail=" ".join(detail))
msg_list.append(msg)
return self.fail(msg=",".join(msg_list))
return self.success(model_obj)
def result(self, params=None):
result = self.handler_model_params(params)
if self.finish or not self.is_success(result):
return result
import time
start = time.time()
# 获取模型对象
model_obj_result = self.get_model_obj()
if not self.is_success(model_obj_result):
return model_obj_result
model_obj = self.get_data(model_obj_result)
# 更新字段
model_obj_result = self.update_field(model_obj)
if not self.is_success(model_obj_result):
return model_obj_result
# 校验数据
model_obj_result = self.validate_model(model_obj)
if not model_obj_result:
return model_obj_result
# 保存
fields = {}
exclude_save_field = self.get_exclude_save_field()
if exclude_save_field: # 去掉不可以修改的字段
update_fields = self.get_update_fields(model_obj)
update_fields = [i for i in update_fields if i not in exclude_save_field]
if len(update_fields) > 0:
fields["update_fields"] = update_fields
model_obj.save(**fields)
else:
model_obj.save()
param_result = self.get_params_result()
end = time.time()
if self.can_log():
service_name = get_safe_data(self.get_service_name(), params)
self.log("{service} 保存耗时 {spend}".format(service=service_name, spend=str(end - start)))
def getDict():
fields = []
for field in model_obj._meta.fields:
fields.append(field.name)
d = {}
def isAttrInstance(attr, clazz):
return isinstance(getattr(model_obj, attr), clazz)
import datetime
for attr in fields:
if isinstance(getattr(model_obj, attr), datetime.datetime):
d[attr] = getattr(model_obj, attr).strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(getattr(model_obj, attr), datetime.date):
d[attr] = getattr(model_obj, attr).strftime('%Y-%m-%d')
elif isAttrInstance(attr, int) or isAttrInstance(attr, float) \
or isAttrInstance(attr, str):
d[attr] = getattr(model_obj, attr)
# else:
# d[attr] = getattr(self, attr)
return d
return self.success(data=getDict(), msg="保存成功")
| 35.881818
| 99
| 0.587281
|
aaee56c85ac1ba40fa35d33cfa62e97af249a118
| 203
|
py
|
Python
|
shortcodes/templatetags/shortcodes_filters.py
|
ONWT/django-shortcodes
|
132082384902daa814a41695a679b5436ea14e57
|
[
"MIT"
] | null | null | null |
shortcodes/templatetags/shortcodes_filters.py
|
ONWT/django-shortcodes
|
132082384902daa814a41695a679b5436ea14e57
|
[
"MIT"
] | null | null | null |
shortcodes/templatetags/shortcodes_filters.py
|
ONWT/django-shortcodes
|
132082384902daa814a41695a679b5436ea14e57
|
[
"MIT"
] | null | null | null |
from shortcodes import parser
from django import template
register = template.Library()
def shortcodes_replace(value):
return parser.parse(value)
register.filter('shortcodes', shortcodes_replace)
| 20.3
| 49
| 0.802956
|
3b225a65fbbadb352361ee40a5c3ac9fad1cb4f4
| 1,193
|
py
|
Python
|
Awards/urls.py
|
olesigilai/Awards
|
8404517249c34f65cd2ce49fc2b13654056047d0
|
[
"MIT",
"Unlicense"
] | null | null | null |
Awards/urls.py
|
olesigilai/Awards
|
8404517249c34f65cd2ce49fc2b13654056047d0
|
[
"MIT",
"Unlicense"
] | 1
|
2021-04-06T19:27:04.000Z
|
2021-04-06T19:27:04.000Z
|
Awards/urls.py
|
olesigilai/Awards
|
8404517249c34f65cd2ce49fc2b13654056047d0
|
[
"MIT",
"Unlicense"
] | null | null | null |
"""Awards URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
# from django.conf.urls import include,path,url
from django.urls import include,path
from django_registration.backends.one_step.views import RegistrationView
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/register/',
RegistrationView.as_view(success_url='/'),
name='django_registration_register'),
path('accounts/', include('django_registration.backends.one_step.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('', include ('user.urls')),
]
| 36.151515
| 77
| 0.70746
|
fc5394d88fb769d2d0b3af261ecb7a175871aad9
| 4,628
|
py
|
Python
|
tmp/quadrature.py
|
saustinp/3D-CG
|
8d3e161674273649af1f23b2a0e1d5100971477a
|
[
"MIT"
] | null | null | null |
tmp/quadrature.py
|
saustinp/3D-CG
|
8d3e161674273649af1f23b2a0e1d5100971477a
|
[
"MIT"
] | null | null | null |
tmp/quadrature.py
|
saustinp/3D-CG
|
8d3e161674273649af1f23b2a0e1d5100971477a
|
[
"MIT"
] | null | null | null |
import numpy as np
import logging
logger = logging.getLogger(__name__)
# Finding the sim root directory
import sys
from pathlib import Path
cwd = Path.cwd()
for dirname in tuple(cwd.parents):
if dirname.name == '3D-CG':
sim_root_dir = dirname
continue
sys.path.append(str(sim_root_dir.joinpath('util')))
from math_helper_fcns import inv
def face_surface_integral(ho_pts, master, field, ndim):
if ndim == 2:
raise NotImplementedError
elif ndim == 3:
# First, prepare data structures to build PHI, DX, and DY, DETJ, and other matrices:
n_gqpts = master['gptsface'].shape[0]
# The following matrices are size (nplocal, npgauss), essentially restructuring the master.shap dataset
PHI = master['shapface'][:, :, 0].T
DPHI_DXI = master['shapface'][:, :, 1].T
DPHI_DETA = master['shapface'][:, :, 2].T
# GQ weights in matrix form
W = np.diag(master['gwface'])
# Why transpose again? And check the dimensions of g
G_GQ = PHI.T@field # For now, assume the neumann condition is uniform across the face. But be prepared to change that in the future.
J = np.zeros((n_gqpts, 2, 3))
J[:, 0, 0] = DPHI_DXI.T@ho_pts[:, 0] # DX_DXI
J[:, 0, 1] = DPHI_DXI.T@ho_pts[:, 1] # DY_DXI
J[:, 0, 2] = DPHI_DXI.T@ho_pts[:, 2] # DZ_DXI
J[:, 1, 0] = DPHI_DETA.T@ho_pts[:, 0] # DX_DETA
J[:, 1, 1] = DPHI_DETA.T@ho_pts[:, 1] # DY_DETA
J[:, 1, 2] = DPHI_DETA.T@ho_pts[:, 2] # DZ_DETA
# Determinants of Jacobians stored as a matrix, diagonal)
JAC_DET = np.zeros((n_gqpts))
for i in np.arange(n_gqpts):
JAC_DET[i] = np.linalg.norm(np.cross(J[i, 0, :], J[i, 1, :])) # Magic number 1/3 to correct scaling issue seen - not quite sure why here - but maybe the (1/2) and (1/3) should be combined to make (1/6) under the mapping from a cube to a tet volume? Not quite sure here.
JAC_DET = np.diag(JAC_DET)
# This is basically the same as in the volume integral case, except that the jacobian determinants represent the transformation from a square in the x-y plane to an arbitrarily oriented square in R^3
dF = PHI@W@JAC_DET@G_GQ # This is the contribution to the total integral from this particular volume element
# return np.sum(dF) # Returning as a scalar
return dF # Returning as a scalar
def elem_volume_integral(ho_pts, master, field, ndim):
if ndim == 2:
raise NotImplementedError
elif ndim == 3:
# First, prepare data structures to build PHI, DX, and DY, DETJ, and other matrices:
n_gqpts = master['gptsvol'].shape[0]
# The following matrices are size (nplocal, npgauss), essentially restructuring the master.shap dataset
PHI = master['shapvol'][:, :, 0].T
DPHI_DXI = master['shapvol'][:, :, 1].T
DPHI_DETA = master['shapvol'][:, :, 2].T
DPHI_DGAMMA = master['shapvol'][:, :, 3].T
# GQ weights in matrix form
W = np.diag(master['gwvol'])
# Why transpose again? And check the dimensions of g
G_GQ = PHI.T@field # For now, assume the neumann condition is uniform across the face. But be prepared to change that in the future.
J = np.zeros((n_gqpts, 3, 3))
J[:, 0, 0] = DPHI_DXI.T@ho_pts[:, 0] # DX_DXI
J[:, 0, 1] = DPHI_DXI.T@ho_pts[:, 1] # DY_DXI
J[:, 0, 2] = DPHI_DXI.T@ho_pts[:, 2] # DZ_DXI
J[:, 1, 0] = DPHI_DETA.T@ho_pts[:, 0] # DX_DETA
J[:, 1, 1] = DPHI_DETA.T@ho_pts[:, 1] # DY_DETA
J[:, 1, 2] = DPHI_DETA.T@ho_pts[:, 2] # DZ_DETA
J[:, 2, 0] = DPHI_DGAMMA.T@ho_pts[:, 0] # DX_DGAMMA
J[:, 2, 1] = DPHI_DGAMMA.T@ho_pts[:, 1] # DY_DGAMMA
J[:, 2, 2] = DPHI_DGAMMA.T@ho_pts[:, 2] # DZ_DGAMMA
# Determinants of Jacobians stored as a matrix, diagonal)
# __, JAC_DET = inv(J)
JAC_DET = np.zeros((n_gqpts, 1)); # Determinants of Jacobians stored as a matrix, diagonal)
for i in np.arange(n_gqpts):
JAC_DET[i] = np.linalg.det(J[i, :, :])
JAC_DET = np.diag(np.squeeze(JAC_DET))
# JAC_DET = np.diag(JAC_DET)
# This is basically the same as in the volume integral case, except that the jacobian determinants represent the transformation from a square in the x-y plane to an arbitrarily oriented square in R^3
dF = PHI@W@JAC_DET@G_GQ # This is the contribution to the total integral from this particular volume element
return np.sum(dF) # Returning as a scalar
| 44.932039
| 281
| 0.613872
|
1d8dad04a98076083cfc1f8881e1f7096f5ed0b3
| 1,260
|
py
|
Python
|
Josh/Clustering/FinalCluster.py
|
aco8ogren/Tentin-Quarantino
|
08b494f5deb2c33e3bb5981135c780b0a34d5557
|
[
"MIT"
] | null | null | null |
Josh/Clustering/FinalCluster.py
|
aco8ogren/Tentin-Quarantino
|
08b494f5deb2c33e3bb5981135c780b0a34d5557
|
[
"MIT"
] | null | null | null |
Josh/Clustering/FinalCluster.py
|
aco8ogren/Tentin-Quarantino
|
08b494f5deb2c33e3bb5981135c780b0a34d5557
|
[
"MIT"
] | null | null | null |
#%%
import pickle
import numpy as np
import matplotlib.pyplot as plt
import git
import sys
sys.path.append('Josh/Clustering')
from kMeansClustering import Kmeans
import time
import os
import pandas as pd
repo=git.Repo('.', search_parent_directories=True)
cwd=repo.working_dir
os.chdir(cwd)
File=open('Josh/Processing/Processed Data/GeoDict.pkl','rb')
GeoDict=pickle.load(File)
del GeoDict[0]
# vals=[val for val in GeoDict.values()]
# for i,val in enumerate(vals):
# if (np.isnan(val)).any():
# del vals[i]
GeoVals=np.array([[key]+[vall for vall in val] for key,val in GeoDict.items()])
vals=GeoVals[:,1:]
#%%
GeoDf=pd.DataFrame(GeoVals,columns=['fips','long','lat'])
K=75
clusters=Kmeans(K,vals)[0]
ClusterDf=pd.DataFrame(data={'fips':GeoVals[:,0],'cluster':clusters})
DF=GeoDf.merge(ClusterDf,how='left', on='fips')
cols=['r','b','g','k','y','c','m']
colors=np.array([None]*len(DF))
for i in range(K):
colors[DF['cluster'].values==i]=cols[i%len(cols)]
plt.figure()
plt.scatter(DF['long'],DF['lat'],color=colors)
plt.title('K-Means Clustering:\nUS Counties')
plt.xlabel('Longitude [°]')
plt.ylabel('Latitude [°]')
plt.show()
# plt.savefig('ClusterResults.png')
# DF.to_csv('Josh/Clustering/FinalClusters.csv',index=False)
# %%
| 25.2
| 79
| 0.693651
|
f98a750d4dde92aefbeacf3b37a43cfa5744e9cc
| 166
|
py
|
Python
|
blog/blog_app/forms.py
|
nigel-otieno/assessment_reinhardt
|
2748560424ecccea996bff3ef8063195b580477f
|
[
"BSD-3-Clause"
] | null | null | null |
blog/blog_app/forms.py
|
nigel-otieno/assessment_reinhardt
|
2748560424ecccea996bff3ef8063195b580477f
|
[
"BSD-3-Clause"
] | null | null | null |
blog/blog_app/forms.py
|
nigel-otieno/assessment_reinhardt
|
2748560424ecccea996bff3ef8063195b580477f
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['name', 'body']
| 16.6
| 35
| 0.656627
|
df2f76fadbcf08962ecd1f8ccddbe69fe8658705
| 799
|
py
|
Python
|
Lib/corpuscrawler/crawl_kup.py
|
cash/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 95
|
2019-06-13T23:34:21.000Z
|
2022-03-12T05:22:49.000Z
|
Lib/corpuscrawler/crawl_kup.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 31
|
2019-06-02T18:56:53.000Z
|
2021-08-10T20:16:02.000Z
|
Lib/corpuscrawler/crawl_kup.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 35
|
2019-06-18T08:26:24.000Z
|
2022-01-11T13:59:40.000Z
|
# coding: utf-8
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
import re
def crawl(crawler):
out = crawler.get_output(language='kup')
crawler.crawl_pngscriptures_org(out, language='kup')
| 34.73913
| 74
| 0.764706
|
472ba03d8d98666bce6f74cc0195d9bf067b0e19
| 205
|
py
|
Python
|
socialregistration/contrib/instagram/templatetags/instagram.py
|
nypublicradio/legacy-publisher-django-socialregistration
|
181f75d152f553f77fa899dac895c4276108204f
|
[
"MIT"
] | 63
|
2015-01-27T16:52:03.000Z
|
2021-08-29T04:23:51.000Z
|
socialregistration/contrib/instagram/templatetags/instagram.py
|
jairtrejo/django-socialregistration
|
c52b1db00bb7cfed2e0a44e587c3da6d2f5499c4
|
[
"MIT"
] | 3
|
2016-05-26T07:46:53.000Z
|
2022-02-16T15:25:16.000Z
|
socialregistration/contrib/instagram/templatetags/instagram.py
|
jairtrejo/django-socialregistration
|
c52b1db00bb7cfed2e0a44e587c3da6d2f5499c4
|
[
"MIT"
] | 23
|
2015-02-02T13:33:46.000Z
|
2020-10-25T20:02:53.000Z
|
from django import template
from socialregistration.templatetags import button
register = template.Library()
register.tag('instagram_button', button('socialregistration/instagram/instagram_button.html'))
| 34.166667
| 94
| 0.843902
|
9c01b22bd1034986a6c9ec40cdb4d111c15ace27
| 459
|
py
|
Python
|
Compilation/liec_seance-e_files/jeu.py
|
bk211/fac
|
9d74875561f60978d2c39af3d5e97605d21c0cc9
|
[
"MIT"
] | 1
|
2018-11-21T10:56:09.000Z
|
2018-11-21T10:56:09.000Z
|
Compilation/liec_seance-e_files/jeu.py
|
bk211/fac
|
9d74875561f60978d2c39af3d5e97605d21c0cc9
|
[
"MIT"
] | null | null | null |
Compilation/liec_seance-e_files/jeu.py
|
bk211/fac
|
9d74875561f60978d2c39af3d5e97605d21c0cc9
|
[
"MIT"
] | 1
|
2021-12-04T20:31:36.000Z
|
2021-12-04T20:31:36.000Z
|
prng_state = 42
def prng (sup):
global prng_state
prng_state = (prng_state * 1664525 + 1013904223) % 2**32
return prng_state % sup
def jeu (max):
print('Le nombre est entre 0 et ' + str(max))
n = prng(max)
guess = -1
while guess != n:
guess = int(input('Entrez un nombre: '))
if guess > n:
print("Trop grand.")
elif guess < n:
print("Trop petit.")
print("Bravo :).")
jeu(300)
| 21.857143
| 60
| 0.544662
|
efa4a5907ca523b4fb6f6685ee55fb131fbaefd5
| 10,527
|
py
|
Python
|
test/smoothness_tests.py
|
Zhi-ChaoZhao/NRSurMemory_7qd4
|
0f49530e7602f136f99879fbb4c9bb6e2f48cdc0
|
[
"MIT"
] | null | null | null |
test/smoothness_tests.py
|
Zhi-ChaoZhao/NRSurMemory_7qd4
|
0f49530e7602f136f99879fbb4c9bb6e2f48cdc0
|
[
"MIT"
] | null | null | null |
test/smoothness_tests.py
|
Zhi-ChaoZhao/NRSurMemory_7qd4
|
0f49530e7602f136f99879fbb4c9bb6e2f48cdc0
|
[
"MIT"
] | 1
|
2021-01-14T06:52:25.000Z
|
2021-01-14T06:52:25.000Z
|
import numpy as np
import h5py
import argparse
import os
import time
import matplotlib.pyplot as P
import surfinBH
# Number of data points to generate for each 1d plot
NUM_PARAMS = 50
# Number of 1d plots to generate per fit
NUM_TESTS = 10
# plot settings
marker_size=100
marker_size_star=9
label_fontsize = 14
title_fontsize = 16
ticks_fontsize = 16
line_width = 1.5
legend_size = 12
# --------------------------------------------------------------------------
def test_smoothness_wrapper(ax_pair, x_list, function, ylabel, \
params_list, label, y_index=None):
""" Wrapper to make plots of fit errors along different 1d directions.
x_list is an array of param values along the 1d direction.
"""
# Get list of q, chiA, and chiB
if aligned_spin_only:
q_list, chiAz_list, chiBz_list = params_list
chiA_list = [[0,0,tmp] for tmp in chiAz_list]
chiB_list = [[0,0,tmp] for tmp in chiBz_list]
else:
q_list, chiA_mag_list, chiA_th_list, chiA_ph_list, \
chiB_mag_list, chiB_th_list, chiB_ph_list = params_list
chiA_list = np.array(\
[chiA_mag_list*np.sin(chiA_th_list)*np.cos(chiA_ph_list),
chiA_mag_list*np.sin(chiA_th_list)*np.sin(chiA_ph_list),
chiA_mag_list*np.cos(chiA_th_list)]\
).T
chiB_list = np.array(\
[chiB_mag_list*np.sin(chiB_th_list)*np.cos(chiB_ph_list),
chiB_mag_list*np.sin(chiB_th_list)*np.sin(chiB_ph_list),
chiB_mag_list*np.cos(chiB_th_list)]\
).T
# Plot function and error estimates in a row of subplots
y_list = []
y_err_list = []
for i in range(len(q_list)):
y, y_err = function(q_list[i], chiA_list[i], chiB_list[i])
if y_index is None:
y_list.append(y)
y_err_list.append(y_err)
elif type(y_index) == int:
y_list.append(y[y_index])
y_err_list.append(y_err[y_index])
elif y_index == 'magnitude':
y_list.append(np.sqrt(np.sum(y**2)))
y_err_list.append(np.sqrt(np.sum(y_err**2)))
ax_pair[0].plot(x_list, y_list, label=label)
ax_pair[1].semilogy(x_list, y_err_list, label=label)
ax_pair[0].set_ylabel('%s'%ylabel, fontsize=label_fontsize)
ax_pair[1].set_ylabel('$\Delta$%s'%ylabel, fontsize=label_fontsize)
# --------------------------------------------------------------------------
def generate_params_along_line_aligned(x_param):
""" Generates params along a 1d line for aligned-spin params.
The given x_param is varied but all other params are kept const at
some randomly chosen values.
For each of the aligned-spin params:
if x_param is this particular param, returns a uniform list of
values in the range of this param. Also saves this list as
x_list.
else, returns a list, each element of which is the same, a
randomly chosen value in the range of this param.
Also returns a label with all the fixed params.
"""
label = ''
if x_param == 'q':
q_list = np.linspace(1, param_lims['q'], NUM_PARAMS)
x_list = q_list
else:
q = np.random.uniform(1, param_lims['q'])
q_list = [q for i in range(NUM_PARAMS)]
label += '$q=%.2f$ '%q
if x_param == 'chi1':
chiAz_list = np.linspace(-param_lims['chiAmag'], \
param_lims['chiAmag'], NUM_PARAMS)
x_list = chiAz_list
else:
chiAz = np.random.uniform(-param_lims['chiAmag'], param_lims['chiAmag'])
chiAz_list = [chiAz for i in range(NUM_PARAMS)]
label += '$\chi_{1z}=%.2f$ '%chiAz
if x_param == 'chi2':
chiBz_list = np.linspace(-param_lims['chiBmag'], \
param_lims['chiBmag'], NUM_PARAMS)
x_list = chiBz_list
else:
chiBz = np.random.uniform(-param_lims['chiBmag'], param_lims['chiBmag'])
chiBz_list = [chiBz for i in range(NUM_PARAMS)]
label += '$\chi_{2z}=%.2f$ '%chiBz
params_list = [q_list, chiAz_list, chiBz_list]
return x_list, label, params_list
# --------------------------------------------------------------------------
def generate_params_along_line(x_param):
""" Generates params along a 1d line for 7d params.
The given x_param is varied but all other params are kept const at
some randomly chosen values.
For each of the 7d params:
if x_param is this particular param, returns a uniform list of
values in the range of this param. Also saves this list as
x_list.
else, returns a list, each element of which is the same, a
randomly chosen value in the range of this param.
Also returns a label with all the fixed params.
"""
label = ''
if x_param == 'q':
q_list = np.linspace(1, param_lims['q'], NUM_PARAMS)
x_list = q_list
else:
q = np.random.uniform(1, param_lims['q'])
q_list = [q for i in range(NUM_PARAMS)]
label += '$q=%.2f$ '%q
if x_param == 'chi1':
chiA_mag_list = np.linspace(0, param_lims['chiAmag'], NUM_PARAMS)
x_list = chiA_mag_list
else:
chiA_mag = np.random.uniform(0, param_lims['chiAmag'])
chiA_mag_list = [chiA_mag for i in range(NUM_PARAMS)]
label += '$\chi_1=%.2f$ '%chiA_mag
if x_param == 'chi1_th':
chiA_th_list = np.linspace(0, np.pi, NUM_PARAMS)
x_list = chiA_th_list
else:
chiA_th = np.random.uniform(0, np.pi)
chiA_th_list = [chiA_th for i in range(NUM_PARAMS)]
label += '$\chi_{1\\theta}=%.2f$ '%chiA_th
if x_param == 'chi1_ph':
chiA_ph_list = np.linspace(0, 2*np.pi, NUM_PARAMS)
x_list = chiA_ph_list
else:
chiA_ph = np.random.uniform(0, 2*np.pi)
chiA_ph_list = [chiA_ph for i in range(NUM_PARAMS)]
label += '$\chi_{1\\phi}=%.2f$ '%chiA_ph
if x_param == 'chi2':
chiB_mag_list = np.linspace(0, param_lims['chiBmag'], NUM_PARAMS)
x_list = chiB_mag_list
else:
chiB_mag = np.random.uniform(0, param_lims['chiBmag'])
chiB_mag_list = [chiB_mag for i in range(NUM_PARAMS)]
label += '$\chi_2=%.2f$ '%chiB_mag
if x_param == 'chi2_th':
chiB_th_list = np.linspace(0, np.pi, NUM_PARAMS)
x_list = chiB_th_list
else:
chiB_th = np.random.uniform(0, np.pi)
chiB_th_list = [chiB_th for i in range(NUM_PARAMS)]
label += '$\chi_{2\\theta}=%.2f$ '%chiB_th
if x_param == 'chi2_ph':
chiB_ph_list = np.linspace(0, 2*np.pi, NUM_PARAMS)
x_list = chiB_ph_list
else:
chiB_ph = np.random.uniform(0, 2*np.pi)
chiB_ph_list = [chiB_ph for i in range(NUM_PARAMS)]
label += '$\chi_{2\\phi}=%.2f$ '%chiB_ph
params_list = [q_list, chiA_mag_list, chiA_th_list, chiA_ph_list, \
chiB_mag_list, chiB_th_list, chiB_ph_list]
return x_list, label, params_list
# --------------------------------------------------------------------------
def test_smoothness(x_param, x_param_label):
""" Tests smoothness in the direction of x_param.
Does NUM_TESTS number of tests, for each tests the rest of the
params are fixed at some randomly chosen values.
"""
if aligned_spin_only:
# Don't need spin direction plots for aligned-spin fits
if x_param[-3:] in ['_th', '_ph']:
return
fig, axarr = P.subplots(4,2,figsize=(10,10))
else:
fig, axarr = P.subplots(7,2,figsize=(10,15))
P.subplots_adjust(hspace=0.25, wspace=0.35)
axarr = axarr.reshape(-1, order='C')
comp_labels = ['x', 'y', 'z']
for i in range(NUM_TESTS):
# Get parameters along a 1d line, where x_param is varied but
# all other params are kept const at some randomly chosen values
if aligned_spin_only:
x_list, label, params_list \
= generate_params_along_line_aligned(x_param)
else:
x_list, label, params_list = generate_params_along_line(x_param)
# final mass plots along the 1d line
test_smoothness_wrapper(axarr[:2], x_list, fit.mf, '$m$', params_list, \
label)
# final spin, but plot only z values for aligned-spins
chi_idx = 0
for idx in range(3):
if (idx == 2) or (not aligned_spin_only):
test_smoothness_wrapper(\
axarr[2+2*chi_idx:4+2*chi_idx], x_list, fit.chif, \
'$\chi_{%s}$'%comp_labels[idx], params_list, label, \
y_index=idx)
chi_idx += 1
# final kick, but plot only x,y values for aligned-spins
vel_idx = 0
for idx in range(3):
if (idx in [0,1]) or (not aligned_spin_only):
test_smoothness_wrapper(\
axarr[2+2*chi_idx+2*vel_idx:4+2*chi_idx+2*vel_idx], \
x_list, fit.vf, '$v_{%s}$'%comp_labels[idx], \
params_list, label, y_index=idx)
vel_idx += 1
axarr[-1].set_xlabel(x_param_label, fontsize=label_fontsize)
axarr[-2].set_xlabel(x_param_label, fontsize=label_fontsize)
axarr[1].legend(loc=(1.1,-1))
P.savefig('%s/%s_smoothness_1d_%s.png'%(outdir, args.name, x_param), \
bbox_inches='tight')
P.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tests smoothness of fit ' \
'along different 1d directions', \
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--name", "-n", type=str, required=True, \
help="Fit name without the surfinBH prefix. Eg. 7dq2.")
args = parser.parse_args()
# Load fit
fit_name = args.name
fit = surfinBH.LoadFits(fit_name)
# get allowed range of params
param_lims = fit.hard_param_lims
# get bool for whether aligned_spin_only
aligned_spin_only = fit.aligned_spin_only
outdir = 'smoothness_tests'
os.system('mkdir -p %s'%outdir)
# test smoothness along different 1d directions
test_smoothness('q', '$q$')
test_smoothness('chi1', '$\chi_{1}$')
test_smoothness('chi1_th', '$\chi_{1\\theta}$')
test_smoothness('chi1_ph', '$\chi_{1\\phi}$')
test_smoothness('chi2', '$\chi_{2}$')
test_smoothness('chi2_th', '$\chi_{2\\theta}$')
test_smoothness('chi2_ph', '$\chi_{2\\phi}$')
| 35.684746
| 80
| 0.596181
|
22850b56d63e294a5c6e53cea630fe67e6f15fb6
| 18,045
|
py
|
Python
|
etc/dbus-serialbattery/test_max17853.py
|
trixing/dbus-serialbattery
|
4bcbb20b8c27a01426dd41cc8688d11be1bad8ec
|
[
"MIT"
] | 87
|
2020-09-17T21:10:04.000Z
|
2022-03-30T08:04:20.000Z
|
etc/dbus-serialbattery/test_max17853.py
|
trixing/dbus-serialbattery
|
4bcbb20b8c27a01426dd41cc8688d11be1bad8ec
|
[
"MIT"
] | 74
|
2021-02-04T15:14:42.000Z
|
2022-03-31T22:35:45.000Z
|
etc/dbus-serialbattery/test_max17853.py
|
trixing/dbus-serialbattery
|
4bcbb20b8c27a01426dd41cc8688d11be1bad8ec
|
[
"MIT"
] | 28
|
2021-02-10T20:46:45.000Z
|
2022-03-28T10:04:05.000Z
|
import spidev
import time
import math
from gpiozero import LED
def init_spi():
global spi, Q_time ,Q_Batt,Q_B_chg,kWh_dis,kWh_chg,cum_bp_kwh_in,\
cum_bp_kwh_out,Q_B_dis,Q_nom,SOH ,R_shunt,Vt_ref,V_bat_Sum,Ai,Ai_offs,\
Tj,Tbat,bal_stat,bal_stat2,p_genrun,p_charging ,p_loadshed,Fan_run_b, V_Cells,\
Ah_b_max, Ah_b_min,T_Cells,err_no, Q_Cycles,bal_count,chg_out,load_out,Genrun,Fan_run
# temp home for BMS constants
err_no = 0
Q_time = 0
Q_Batt = 1.8*60
Q_B_chg = 0
Q_Cycles = 0
kWh_dis = 0
kWh_chg = 0
cum_bp_kwh_in=0
cum_bp_kwh_out=0
Q_B_dis = 0
Q_nom = 3.6*36
SOH = 1
R_shunt = 0.025
V_Cells = [0]*8
T_Cells = [11]*8
Vt_ref = 3.299
V_bat_Sum = 25
Ai = 0
Ai_offs = 0.6
Ah_b_max = 0
Ah_b_min = 300
Tj = 25
Tbat = 25
bal_stat = 0
bal_stat2 = 0
bal_count = [0]*8
p_genrun = False
p_charging = True
p_loadshed = False
Fan_run_b = False
chg_out = LED(2)
load_out =LED(3)
Genrun = LED(4)
Fan_run = LED(17)
#spi = spidev.SpiDev()
#spi.open(0,0)
#spi.max_speed_hz = 500000
#spi.mode = 0
spi = None
return (spi)
def CrcA_MAX17( InputWord,WORD_LEN):
CRC_LEN =3
CRC_POLY =0x0B
CRC_SEED =0x000
CRCMask =(CRC_POLY<<(WORD_LEN-1))
LeftAlignedWord = InputWord<<CRC_LEN # /* Clear the CRC bit in the data frame*/
TestBitMask = 1 << ( WORD_LEN +2)
BitCount = ( WORD_LEN )
while (0 != BitCount):
BitCount -=1
if (0 != (LeftAlignedWord & TestBitMask)): # is and
LeftAlignedWord ^= CRCMask # is xor
CRCMask >>= 1
TestBitMask >>= 1
return (LeftAlignedWord) #returns word with CRC apended; crc test.
def spi_xfer_MAX17(RW,Adr,xdata):
global spi
#*********************
# Python 2.7 can't cope with 32 bit numbers
#****************************
print("SPI:",RW,"{:02x}".format(Adr),"{:04x}".format(xdata))
return(0,0,Adr,xdata,0,0)
txdata = [0,0,0,0]
rxdata = [0,0,0,0]
tdwd = RW<<8^Adr
crca = CrcA_MAX17(tdwd,9)
crcb = CrcA_MAX17(xdata,16)
txword1 = 0^RW<<15
txword2 = 0^RW<<3
txword1 ^= Adr<<7
txword1 ^= crca<<4
txword1 ^= xdata&0xf000>>12
txword2 ^= xdata&0xfff<<4
txword2 ^= crcb
txdata[0] =0^RW<<7^Adr>>1 #(txword1)>>8
fadr = Adr&1
gadr = fadr<<7
txdata[1] =gadr^crca<<4^xdata>>12 #(txword1&0x00ff)
txdata[2] =(xdata>>4)&0xff #(txword2)>>8
txdata[3] =0^(xdata<<4)&0xff^RW<<3^crcb&0x7 #(txword2&0x00ff)
rxdata = spi.xfer(txdata) #
flags = rxdata[0]
crcs = flags&0x07
flags = flags>>3
if RW == 0:
radr = rxdata[1]
rdat = rxdata[2]<<8^rxdata[3]
rcrc = 0
rxok = rxdata[0]>>3&1 #crc check n-1
else:
radr = Adr
rdat = 0^((rxdata[1]&0x0f)<<16^rxdata[2]<<8^rxdata[3])>>4
rcrc = rxdata[3]&0x07
rxok = (rxdata[3]>>3)&0x01
time.sleep(.01)
return(flags,crcs,radr,rdat,rcrc,rxok)
def init_max(self):
#*************************************8
# need to pick up cell min and max to set cell voltage
# thresholds et al.
#********************************************8
init_spi()
time.sleep(0.1)
for i in range(1,7):
spi_xfer_MAX17(0,i,0x00) # clear por
spi_xfer_MAX17(0,0x14,0x02) # set spi int on AL out
spi_xfer_MAX17(0,0x15,0x04) #disable spi to
spi_xfer_MAX17(0,0x16,0x00) #enable gpio anlg in
t_cell = self.cell_count+1
tc = 0x2000
tc = tc | t_cell<<8 |t_cell<<4 | t_cell
spi_xfer_MAX17(0,0x18,tc) # top cell selection
spi_xfer_MAX17(0,0x19,0x3faf) #IRQ enable
ov = 0x4000
for i in range(1,t_cell):
ov |= 1<<i
spi_xfer_MAX17(0,0x1a,ov) # Over voltage enable
spi_xfer_MAX17(0,0x1b,ov) # Under voltage enable
spi_xfer_MAX17(0,0x1c,0xf) # Aux Over voltage enable 0-5
spi_xfer_MAX17(0,0x1d,0xf) # Aux Under voltage enable 0-5
ovc = int((self.V_C_max-0.1)/0.000305) # V_Cell max - 100mV
spi_xfer_MAX17(0,0x1f,ovc<<2) # over voltage clear thr 3.5V/.305mV <<2
ovs = int((self.V_C_max)/0.000305) # V_Cell max
spi_xfer_MAX17(0,0x20,ovs<<2) # over voltage set thr 3.6V/.305mV <<2
uvc = int((self.V_C_min+0.1)/0.000305) # V_Cell min - 100mV
spi_xfer_MAX17(0,0x21,uvc<<2) # under voltage clear thr 2.6V/.305mV <<2
uvs = int((self.V_C_min)/0.000305) # V_Cell min
spi_xfer_MAX17(0,0x22,uvs<<2) # under voltage set thr 2.5V/.305mV <<2
spi_xfer_MAX17(0,0x23,0x514) # cell mismatch set thr 0.1V/.305mV <<2
bovc = int((self.max_battery_voltage-0.25)/0.001934) #max battery volt - 0.25
spi_xfer_MAX17(0,0x28,bovc<<2) # block ov clear thr 1.934/0.205mV <<2
bovs = int((self.max_battery_voltage)/0.001934) #max battery volt
spi_xfer_MAX17(0,0x29,bovs<<2) # block ov set thr 1.984/0.201mV <<2
buvc = int((self.min_battery_voltage+0.25)/0.001934) #max battery volt + 0.25
spi_xfer_MAX17(0,0x2a,buvc<<2) # block uv cl thr 0.9907/0.201mV <<2
buvs = int((self.min_battery_voltage)/0.001934) #max battery volt
spi_xfer_MAX17(0,0x2b,buvs<<2) # block uv set thr 0.9407/0.201mV <<2
tovc = xtemp(self.T_C_min+5) # Aux under temp clear T cell min + 5c - Neg temp coeff!!
spi_xfer_MAX17(0,0x30,tovc<<2) # Aux undertemp clear thr V/3.967mV <<2
tovs = xtemp(self.T_C_min) # Aux under temp set T cell min
spi_xfer_MAX17(0,0x31,tovs) # Aux under temp set thr V/3.967mV <<2
tuvc = xtemp(self.T_C_max-5) # Aux over temp clear T cell max - 5c - Neg temp coeff!!
spi_xfer_MAX17(0,0x32,tuvc<<2) # Aux uv cl thr V/3.967mV <<2
tuvs = xtemp(self.T_C_max) # Aux over temp set T cell max - Neg temp coeff!!
spi_xfer_MAX17(0,0x33,tuvs<<2) # Aux uv set thr 20.8V/3.967mV <<2
spi_xfer_MAX17(0,0x5f,0x01) # ADC Polarity
spi_xfer_MAX17(0,0x62,0x4800) # ADCQ CFG
spi_xfer_MAX17(0,0x63,0x303) # BALSWDLY 3 x 96uS
cms = tc+1
spi_xfer_MAX17(0,0x64,cms) # cell measure enable
spi_xfer_MAX17(0,0x65,0x803F) # filter init, AUX meas enable
spi_xfer_MAX17(0,0x66,0xe21) # configure and init scan
spi_xfer_MAX17(0,0x80,0x00) #reset Bal CTRL
spi_xfer_MAX17(0,0x6f,0x1fe)
spi_xfer_MAX17(0,0x7e,0x01) #set bal uv thr = mincell
spi_xfer_MAX17(0,0x6b,1) # set die temp diag 1.
return()
def xtemp(temp):
t = temp+12.74
s = math.exp(0.01988*t)
r = int(0x3fff/s)
return(r)
def vblk_dec(xdata,ref,adr):
global V_bat_Sum,VBS_max,VBS_min,min_rst_en,Q_Batt
vblock = xdata*ref
if adr == 22:
V_bat_Sum = vblock
return(vblock)
def stat_scan(self):
for i in range(2,0x17): # Read Status
f= spi_xfer_MAX17(1,i,0x0)
if i == 2:
st_wd1 = f[3]
if i ==3:
st_wd2 = f[3]
if i == 5:
fema1 = f[3]
for i in range (2,7): #Write stat 1:3, Fema to clear
f = spi_xfer_MAX17(0,i,0)
en = err_dec(st_wd1,st_wd2,fema1,self)
#print("stat",en)
return(en)
def err_dec(st_wd1,st_wd2,fema1,self):
global err_no, err_msg
if st_wd1 & 0x04 > 0:
err_no = 11
err_msg = "Bal Error?"
if st_wd1 & 0x8 > 0:
err_no = 10
err_msg = "Cal Error"
if st_wd1 & 0x10 > 0 and st_wd2 & 0xd0 > 0:
err_no = 9
err_msg = "SPI Error"
if st_wd1 & 0x80 > 0:
err_no = 8
err_msg = "Battery Over Temp"
self.protection.temp_high_charge = True
else:
self.protection.temp_high_charge = False
if st_wd1 & 0x100 > 0:
err_no = 7
err_msg = "Battery Under Temp"
self.protection.temp_low_charge
else:
self.protection.temp_low_charge = False
if st_wd1 & 0x200 > 0:
err_no = 6
err_msg = "Battery Undervoltage"
self.protection.voltage_low = True
else:
self.protection.voltage_low = False
if st_wd1 & 0x400 > 0:
err_no = 5
err_msg = "Battery Overvoltage"
self.protection.voltage_high = True
else:
self.protection.voltage_high = False
if st_wd1 & 0x800 > 0:
err_no = 4
err_msg = "Cell Undervoltage"
self.protection.voltage_low = True
else:
self.protection.voltage_low = False
if st_wd1 & 0x1000 > 0:
err_no = 3 #overvoltage
err_msg = "Cell Overvoltage"
self.protection.voltage_high = True
else:
self.protection.voltage_high = False
if st_wd1 & 0x2000 > 0:
err_no = 2 #cell mismatch Dv too high
err_msg = "Cell voltage mismatch"
self.protection.cell_imbalance = True
else:
self.protection.cell_imbalance = False
if st_wd1 & 0x4000 > 0:
err_no = 1 #POR
err_msg = "POR"
if st_wd2 & 0x40 > 0:
err_no = 13
err_msg += " SPI CLK"
if st_wd2 & 0x20 > 0:
err_no = 14
err_msg += " 16MHz CLK"
if st_wd2 & 0x10 > 0:
err_no = 15
err_msg += " SPI INT BUS FLT"
if fema1 &0x08 >0:
err_no = 16
#print(315)
err_msg += " HV_UV"
if fema1 &0x04 >0:
err_no = 17
#print(319)
err_msg += " HV_DR"
if fema1 &0x70 >0:
err_no = 18
err_msg += " gnd flt"
if st_wd1 ==0 and st_wd2==0 and fema1 ==0:
err_no = 0
err_msg = "No Error"
#store_reg([err_no],0)
#print(328,err_no)
return(err_no)
def v_cell_d(self):
global vc_del,vc_min,vc_max,Q_Batt, V_Cells,p_genrun,p_charging,p_loadshed
vc_del = 0
vc_max = 0
vc_min = 4
i_min =0
i_max = 0
b_lim = False
for index,v in enumerate(V_Cells):
if v > 3.55:
b_lim = True
if v> vc_max:
vc_max = v
i_max = index
if v < vc_min:
vc_min = v
i_min = index
self.cell_min_voltage = vc_min
self.cell_max_voltage = vc_max
self.cell_min_no = i_min
self.cell_max_no = i_max
vc_del = vc_max - vc_min
# current control done elsewhere.
if vc_min<(self.V_C_min+0.05):
p_genrun = True
p_loadshed = True
Q_Batt = 0
elif vc_min > self.V_C_min+0.15:
p_loadshed = False
if vc_max > self.V_C_max-0.05:
p_charging = False
Q_Batt = Q_nom
elif vc_max< self.V_C_max-0.15:
p_charging = True
inpins(self)
return(b_lim)
def CSA(xdata,self):
global R_shunt,Ai,Ai_offs
Ai = (xdata*0.000305-2.5)/R_shunt +Ai_offs
self.current = Ai
calc_Ah(Ai,self)
return(Ai)
def calc_Ah(Ai,self):
global Q_Batt, Q_time,Q_B_chg,Q_B_dis,Ah_b_max,Ah_b_min,\
x_soc_min,x_soc_max,x_Soc,Q_nom,SOH,kWh_chg,kWh_dis,V_bat_Sum,\
cum_bp_kwh_in,cum_bp_kwh_out,p_genrun,Q_Cycles
if Q_time == 0:
Q_time = time.time()
t_Q = time.time()
d_Qt = t_Q-Q_time
Q_time = t_Q
dQ_Batt = Ai*d_Qt/3600
Q_Batt +=dQ_Batt
if Q_Batt > Q_nom:
Q_Batt = Q_nom
if Q_Batt < 0:
Q_Batt = 0
if Q_Batt > Ah_b_max:
Ah_b_max = Q_Batt
if Q_Batt < Ah_b_min:
Ah_b_min = Q_Batt
x_Soc = Q_Batt/Q_nom*100
self.soc = x_Soc
self.capacity_remain = x_Soc*Q_nom/100
if x_Soc<20:
p_genrun = True
elif x_Soc > 35:
p_genrun = False
SOH = (1-cum_bp_kwh_out/Q_nom*0.00005)*100
Q_act = Q_nom*SOH/100
Q_Cycles = cum_bp_kwh_out/Q_nom*.00005
self.cycles = Q_Cycles
if Ai>0:
Q_B_chg += dQ_Batt
kWh_chg += dQ_Batt*V_bat_Sum/1000
cum_bp_kwh_in +=dQ_Batt*V_bat_Sum/1000
else:
Q_B_dis -= dQ_Batt
kWh_dis -= dQ_Batt*V_bat_Sum/1000
cum_bp_kwh_out-= dQ_Batt*V_bat_Sum/1000
return()
def gpio_decode(xdata,adr,self):
# need to add Dbus channel for device temp
global Vt_ref,Tbat,T_Cells
try:
s = float(0x3fff)/float(xdata+1)
t = math.log(s)
u = t/0.01998
T_Cells[adr] = u-12.74
except Exception as e:
print("gpio_dec",e)
print("gpio_dec",adr,"{:04x}".format(xdata))
T_Cells[adr] = 25
t_min = 100
t_max = 0
for i in range(0,4):
if T_Cells[i] > t_max:
t_max = T_Cells[i]
imax = i
if T_Cells[i] < t_min:
t_min = T_Cells[i]
imin = i
print('gpio')
self.temp1 = t_max
self.temp2 = t_min
self.temp3 = (T_Cells[5]+T_Cells[6] )/2
self.temp_max_no = imax
self.temp_min_no = imin
return()
def cell_balance(V_Cells,vc_min,vc_max,self):
global bal_count,bal_stat,bal_stat2
# need to add dbus channel for displaing balancing as 8 bit bianry?
# f = spi_xfer_MAX17(1,0x80,0x00)
# bal_stat = f[3]>>14
bal_stat = 0
if bal_stat ==3:
spi_xfer_MAX17(0,0x80,0x0)
spi_xfer_MAX17(0,0x6f,0x00)
print("bal reset")
return()
if (bal_stat)&1 >0:
#print("bal run")
return() # Balancing in progress
if (bal_stat) == 2: #balancing complete
#print(511,"Bal Complete")
for i in range (0x6f,0x81):
spi_xfer_MAX17(0,i,0x00)
else:
f = spi_xfer_MAX17(0,0x80,0)
if f[0] !=0:
stat_clr()
cb_sum = 0
cb_duty = int((vc_max-vc_min-0.01)*500)
if cb_duty >15:
cb_duty = 0xf
# max_cell = (f[3]>>8)&0x07
# min_cell = f[3]&0x07
# missing read to minmax cell register?
for i in range (1,9):
Vc_t = int((V_Cells[i-1]-vc_min)/(vc_max-vc_min)*15)
if Vc_t < 0:
print(517,"<0")
Vc_t = 0 # remove -ve
if Vc_t >=0 and V_Cells[i-1]>3.35:
bal_count[i-1]+=Vc_t
self.cells[i-1].balance = 1
if bal_count[i-1]>65535:
for j in range(0,8):
bal_count[j] = bal_count[j]>>1
print("cba",i,Vc_t)
else:
Vc_t = 0
print("cb",i,Vc_t)
self.cells[i-1].balance = 0
return() #tewst
# cb_sum += Vc_t
# spi_xfer_MAX17(0,0x70+i,Vc_t) #set cell timers
# f = spi_xfer_MAX17(1,0x70+i,0) # and read back
# if f[3] != Vc_t:
# print(471,"Can't set T bal :",i)
# f = spi_xfer_MAX17(1,3,0)
# print("prt write rjt",f[3]&1)
# if cb_sum !=0:
# #enable cells & start timer
# f = spi_xfer_MAX17(0,0x6f,0x1fe)
# if f[0] != 0:
# stat_clr()
# #R_bal_stat() Temporary for diagnostic
# xdata = 0x2002 | cb_duty<<4
# xdata = xdata%0xc7ff
# f = spi_xfer_MAX17(0,0x80,xdata)
# print(480,"{:04x}".format(f[3]),"{:02x}".format(f[0]))
# f = spi_xfer_MAX17(1,0x80,0)
# print(481,"{:04x}".format(f[3]),"{:02x}".format(f[0]))
# return()
def R_bal_stat():
for i in range(0x6f,0x84):
f = spi_xfer_MAX17(1,i,0x00)
print("{:02x}".format(i),"{:04x}".format(f[3]),"{:02x}".format(f[0]))
return()
def stat_clr():
for i in range(2,7):
spi_xfer_MAX17(0,i,0)
return()
def die_temp():
return(35)
global Tj,tmaxp,Fan_run_b
f= spi_xfer_MAX17(1,0x57,0) # read diag 1 register
Vptat = f[3]>>2
Vptat = Vptat/0x4000*2.3077
Tj = Vptat/0.0032+8.3-273
self.temp4 = Tj
if Tj >45:
Fan_run_b = True
elif Tj < 40:
Fan_run_b = False
return(Tj)
def inpins(self):
global chg_in,Load_in,Genrun,Fan_run_b,chg_out,p_loadshed,p_charging,p_genrun
if p_charging==True:
self.charge_fet = True
chg_out.on()
else:
self.charge_fet = False
chg_out.off()
if p_loadshed == False:
self.discharge_fet = True
load_out.on()
else:
self.discharge_fet = False
load_out.off()
p_gernun = True
if p_genrun==True :
Genrun.on()
else:
Genrun.off()
if Fan_run_b == True:
Fan_run.on()
else:
Fan_run.off()
return()
def data_cycle(self):
global err_no,V_Cells,T_Cells, vc_max
#print("data_cycle")
spi_xfer_MAX17(0,0x66,0xe21)
#f = spi_xfer_MAX17(1,0x66,0x00)
#scn_dn = f[3]>>15
#dat_rdy = (f[3]&0x2000)>>13
dat_rdy = 1 #test
while dat_rdy == 0 :
f = spi_xfer_MAX17(1,0x66,0x00)
time.sleep(0.005)
scn_dn = (f[3]&0x8000)>>15
dat_rdy = (f[3]&0x2000)>>13
Tj = die_temp()
#f = spi_xfer_MAX17(1,0x66,0x00) # scan ctrl
#scn_dn = f[3]&0x8000>>15
#dat_rdy = f[3]&0x2000>>13
spi_xfer_MAX17(0,0x83,0x1)# manual xfer
f = spi_xfer_MAX17(0,0x66,0x1e28)
if f[0]> 0:
stat_clr()
#V_bat_sum = 0
#for i in range(72,0x50):
# f= spi_xfer_MAX17(1,i,0)
# v = vblk_dec((f[3]>>2),0.000305,i-72) #no change
# V_bat_sum += v
# V_Cells[i-72] = v
# cb_b = v_cell_d(self)
# time.sleep(0.005)
V_Cells = [3.0,3.1,3.2,3.3,3.4,3.5,3.55,3.55]
cb_b = v_cell_d(self)
self.voltage = 26.8 #V_bat_sum
for i in range(self.cell_count):
self.cells[i].voltage = V_Cells[i]
if (vc_del >0.015 and Ai >1.0 and err_no <16 ) or cb_b == True or vc_max>3.45:
self.poll_interval = 3000
cell_balance(V_Cells,vc_min,vc_max,self)
else:
spi_xfer_MAX17(0,0x80,0x00)
spi_xfer_MAX17(0,0x6f,0x00)
self.poll_interval = 1000
# f= spi_xfer_MAX17(1,0x47,0)
# CSA(f[3]>>2,self)
CSA(0x2014,self)
# f= spi_xfer_MAX17(1,0x55,0) # remeber to deduct V0 (AMPS) from V blk.
# vblk_dec((f[3]>>2),0.003967,22)
# f= spi_xfer_MAX17(1,0x56,0)
# vblk_dec(f[3],0.00122,2) #02
for i in range(0x59,0x5f,1):
# f= spi_xfer_MAX17(1,i,0)
# gpio_decode(f[3]>>2,i-89,self) #49-64
gpio_decode(0x2000+i,i-89,self)
time.sleep(0.005)
stat_scan(self)
print('sys tmp', self.temp_max_no)
return(True)
| 29.975083
| 93
| 0.572569
|
7f00992e6550727f70e1cc9214bfe048082a3e4e
| 119
|
py
|
Python
|
Other/IOTest.py
|
jonieson/pythonDemo
|
fde358819a43c67044b30b886c9f7dbe840d9563
|
[
"MIT"
] | 1
|
2017-08-10T08:04:48.000Z
|
2017-08-10T08:04:48.000Z
|
Other/IOTest.py
|
jonieson/pythonDemo
|
fde358819a43c67044b30b886c9f7dbe840d9563
|
[
"MIT"
] | null | null | null |
Other/IOTest.py
|
jonieson/pythonDemo
|
fde358819a43c67044b30b886c9f7dbe840d9563
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
str = raw_input('第一行输入:')
print '打印输入的文字:',str
str = raw_input("请输入:");
print "你输入的内容是: ", str
| 19.833333
| 25
| 0.605042
|
ddaedf55861c4643f9c40a3955ffe1300595f709
| 706
|
py
|
Python
|
tests/plots/colormap.py
|
ikkisoft/infnoise
|
3a63b97b0086b24a890b32990b75e812efd2c83f
|
[
"CC0-1.0"
] | 631
|
2015-01-01T23:35:30.000Z
|
2022-03-24T16:42:57.000Z
|
tests/plots/colormap.py
|
ikkisoft/infnoise
|
3a63b97b0086b24a890b32990b75e812efd2c83f
|
[
"CC0-1.0"
] | 45
|
2015-01-20T13:05:59.000Z
|
2021-11-14T15:24:00.000Z
|
tests/plots/colormap.py
|
ikkisoft/infnoise
|
3a63b97b0086b24a890b32990b75e812efd2c83f
|
[
"CC0-1.0"
] | 83
|
2015-01-23T14:49:45.000Z
|
2022-01-06T03:33:53.000Z
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import sys
from matplotlib import cm
if sys.argv[1]:
filename=sys.argv[1]
else:
filename='infnoise.bin'
nx = 1000
ny = 1000
data = np.fromfile(open(filename,'rb'), dtype=np.uint8, count=nx*nx)
data.resize(nx,ny)
plt.xlim(0, nx)
plt.ylim(0, ny)
plt.xlabel('samples')
plt.ylabel('samples')
plt.title('TRNG ' + filename)
#cax = plt.imshow(data, interpolation='nearest', cmap=cm.coolwarm)
cax = plt.imshow(data, interpolation='nearest', cmap=cm.afmhot)
cbar = plt.colorbar(cax, ticks=[255, 127, 0])
cbar.ax.set_yticklabels(['255', '127', '0'])
plt.savefig(filename + '-colormap.png')
plt.show()
| 21.393939
| 69
| 0.674221
|
bfeb24dd23387b700defcc82646cd29804c36604
| 62,591
|
py
|
Python
|
data_process.py
|
rokosbasilisk/VD-BERT
|
02240d8bf0652ed38dfb4c6d83f0ab68553a4f4b
|
[
"MIT"
] | null | null | null |
data_process.py
|
rokosbasilisk/VD-BERT
|
02240d8bf0652ed38dfb4c6d83f0ab68553a4f4b
|
[
"MIT"
] | null | null | null |
data_process.py
|
rokosbasilisk/VD-BERT
|
02240d8bf0652ed38dfb4c6d83f0ab68553a4f4b
|
[
"MIT"
] | null | null | null |
import sys, torch, json, copy, pickle, re, os, numpy as np, pprint as pp, cProfile, pstats, io, traceback, itertools
from diff import diff, get_diff, get_next_actions, build_region_specs, dict_to_tuple, is_feasible_next_placement
from diff_apps import get_type_distributions
from torch.utils.data import Dataset, DataLoader
from collections import defaultdict, Counter
from utils import *
from dataset_filters import *
from plot_utils import plot_histogram
# MAIN CLASSES
class CwCDataset(Dataset):
""" CwC Dataset compatible with torch.utils.data.DataLoader. """
def __init__(
self, model, split, lower=False, add_builder_utterances=False, compute_diff=True, compute_perspective=True,
augment_dataset=False, augmentation_factor=0, exactly_k=False, strict=False,
data_dir="../data/corpus/logs/", gold_configs_dir="../data/corpus/gold-configurations/", save_dest_dir="../data/corpus/saved_cwc_datasets", saved_dataset_dir="../data/corpus/saved_cwc_datasets/lower-no_diff/",
encoder_vocab=None, decoder_vocab=None, dump_dataset=False, load_dataset=False, transform=None, sample_filters = [],
add_augmented_data=False, augmented_data_fraction=0.0, aug_data_dir="../../data/corpus/augmented-no-spatial/logs/", aug_gold_configs_dir="../../data/corpus/augmented-no-spatial/gold-configurations/"
):
"""
Instantiates a dataset
- If dump_dataset and load_dataset are both un-set, generates the dataset
- If dump_dataset is set, also writes the generated dataset to file
- If load_dataset is set, loads an existing dataset instead of generating (needed most often)
By dataset, we mean self.samples and self.jsons -- the former being actual train/test examples, the latter being the json log files used to obtain these samples
Args:
model (string): model for which data loader is going to be used -- only used to selectively compute some stuff
split (string): which of train/test/dev split to be used. If none, then reads and stores all data.
lower: whether the data should be lowercased.
add_builder_utterances: whether or not to obtain examples for builder utterances as well
compute_diff: whether or not to compute the diff based representations
compute_perspective: whether or not to compute the perspective coordinates based representations
save_dest_dir: where to write the generated dataset
saved_dataset_dir: where to load the saved dataset from
encoder_vocab: encoder vocabulary wrapper.
decoder_vocab: decoder vocabulary wrapper.
dump_dataset: whether to generate dataset and write to file
load_dataset: whether to load dataset from file
"""
self.model = model
self.split = split
self.lower = lower
self.augmentation_factor = augmentation_factor
self.add_builder_utterances = add_builder_utterances
self.compute_diff = compute_diff
self.compute_perspective = compute_perspective
self.exactly_k = exactly_k
self.strict = strict
self.add_augmented_data = add_augmented_data
self.augmented_data_fraction = augmented_data_fraction
self.decoder_vocab = decoder_vocab
self.encoder_vocab = encoder_vocab
self.transform = transform
self.num_prev_utterances = 1
self.blocks_max_weight = 1
self.use_builder_actions = False
self.num_next_actions = 2
self.include_empty_channel = False
self.feasible_next_placements = False
self.use_condensed_action_repr = False
self.action_type_sensitive = False
self.spatial_info_window_size = 1000
self.counters_extra_feasibility_check = False
self.use_existing_blocks_counter = False
self.src_input_size_configs = x_range + y_range + z_range + len(type2id)
self.src_input_size_next_actions = (x_range + y_range + z_range if not self.use_condensed_action_repr else 3) + len(type2id) + (len(action2id) if self.action_type_sensitive else 0)
self.online_data = False # whether this if for architect demo or not aka online mode or not
cwc_datasets_path = save_dest_dir
lower_str = "lower" if self.lower else ""
add_builder_utterances_str = "-add_builder_utterances" if self.add_builder_utterances else ""
diff_str = '-no_diff' if not self.compute_diff else ""
pers_str = '-no_perspective_coords' if not self.compute_perspective else ""
aug_str = "-augmented" if self.add_augmented_data else ""
if True:
if load_dataset:
dataset_dir = saved_dataset_dir
print("Loading dataset ...\n")
print("Loading self.samples ...")
self.samples = load_pkl_data(dataset_dir + "/"+ self.split + "-samples.pkl")
self.filter_augmented_samples()
print("Loading self.jsons ...")
self.jsons = load_pkl_data(dataset_dir + "/"+ self.split + "-jsons.pkl")
print("Done! Loaded dataset of size", len(self.samples))
else:
self.jsons = list(
map(
remove_empty_states,
map(
reorder,
get_logfiles_with_gold_config(data_dir, gold_configs_dir, split)
)
)
) # TODO: Move the extra maps to a postprocesing step for the dataset?
if self.add_augmented_data:
print(timestamp(), "Adding augmented dataset...")
def reformat_utterances(aug_observations_json):
"""
Joins tokens back with a space
"""
for world_state in aug_observations_json["WorldStates"]:
world_state["ChatHistoryTokenized"] = list(map(
lambda x: " ".join(x), world_state["ChatHistoryTokenized"]
))
world_state["ChatHistory"] = world_state.pop("ChatHistoryTokenized")
return aug_observations_json
self.jsons += list(
map(
remove_empty_states,
map(
reorder,
map(
reformat_utterances,
get_logfiles_with_gold_config(aug_data_dir, aug_gold_configs_dir, split, from_aug_data=True)
)
)
)
)
print(timestamp(), 'Started processing jsons to get samples...')
self.samples = self.process_samples(lower, compute_diff=self.compute_diff, compute_perspective=self.compute_perspective)
print(timestamp(), 'Done processing jsons to get samples.')
print("Current dataset size", len(self.samples))
print("Filtering...")
for sample_filter in sample_filters:
self.samples = list(filter(sample_filter, self.samples))
print("Done! Loaded vanilla dataset of size", len(self.samples))
if dump_dataset:
sample_filters_names = list(map(lambda x: x.__name__, sample_filters))
sample_filters_names = "-" + "-".join(sample_filters_names) if sample_filters_names else ""
dataset_dir = lower_str + add_builder_utterances_str + diff_str + pers_str + aug_str + sample_filters_names
dataset_dir = os.path.join(cwc_datasets_path, dataset_dir)
if not os.path.exists(dataset_dir):
os.makedirs(dataset_dir)
print("Saving dataset ...\n")
print("Saving self.jsons ...")
save_pkl_data(dataset_dir + "/"+ self.split + "-jsons.pkl", self.jsons)
save_pkl_data(dataset_dir + "/"+ self.split + "-jsons-2.pkl", self.jsons, protocol=2)
print("Saving self.samples ...")
save_pkl_data(dataset_dir + "/"+ self.split + "-samples.pkl", self.samples)
self.augmentation_factor = 0
def set_args(self, num_prev_utterances=1, blocks_max_weight=1, use_builder_actions=False, num_next_actions=2, include_empty_channel=False, use_condensed_action_repr=False, action_type_sensitive=False, feasible_next_placements=False, spatial_info_window_size=1000, counters_extra_feasibility_check=False, use_existing_blocks_counter=False):
"""
Selectively set some args in the object
"""
self.num_prev_utterances = num_prev_utterances
self.blocks_max_weight = blocks_max_weight
self.use_builder_actions = use_builder_actions
self.num_next_actions = num_next_actions
self.include_empty_channel = include_empty_channel
self.feasible_next_placements = feasible_next_placements
self.use_condensed_action_repr = use_condensed_action_repr
self.action_type_sensitive = action_type_sensitive
self.spatial_info_window_size = spatial_info_window_size
self.counters_extra_feasibility_check = counters_extra_feasibility_check
self.use_existing_blocks_counter = use_existing_blocks_counter
self.src_input_size_next_actions = (x_range + y_range + z_range if not self.use_condensed_action_repr else 3) + len(type2id) + (len(action2id) if self.action_type_sensitive else 0)
def get_sample(self, idx):
""" Returns one data sample (utterance) in tokenized form. """
return self.samples[idx]
def filter_augmented_samples(self):
samples = {'orig': [], 'aug': []}
for sample in self.samples:
samples['orig'].append(sample) if not sample.get('from_aug_data') else samples['aug'].append(sample)
print('\nOriginal dataset contains', len(samples['orig']), 'original samples and', len(samples['aug']), 'augmented samples ('+str(len(samples['orig'])+len(samples['aug'])), 'total samples).')
if self.augmented_data_fraction > 0 and len(samples['aug']) == 0:
print('Error: you specified a fraction of augmented data, but the loaded dataset contains no augmented data.')
sys.exit(0)
if self.augmented_data_fraction == 0 and len(samples['aug']) == 0:
return
if self.augmented_data_fraction < 1.0:
print('Filtering augmented samples with a fraction of', self.augmented_data_fraction, '...')
chosen_aug_samples = np.random.choice(samples['aug'], int(self.augmented_data_fraction*len(samples['aug'])), replace=False)
print('Randomly sampled', len(chosen_aug_samples), 'augmented samples from the full augmented set.')
self.samples = samples['orig']
self.samples.extend(chosen_aug_samples)
def process_samples(self, lower, compute_diff=True, compute_perspective=True):
""" Preprocesses the input JSONs and generates a list of data samples. """
samples = []
try:
for j in range(len(self.jsons)):
print("Processing json", j, "of", len(self.jsons))
try:
js = self.jsons[j]
world_states = js["WorldStates"]
final_observation = world_states[-1]
gold_config = js["gold_config_structure"]
last_world_state = None
chat_history = []
chat_with_actions_history = []
gold_placements, gold_removals = get_gold_actions(world_states)
for i in range(1, len(world_states)):
observation = world_states[i]
built_config = get_built_config(observation)
builder_position = get_builder_position(observation)
last_action = None
gold_placement_list = gold_placements[i]
gold_removal_list = gold_removals[i]
for k, curr_world_state in enumerate(reversed(world_states[:i+1])):
original_index = i-k
# compare blocks with its prev world state
curr_blocks = curr_world_state["BlocksInGrid"]
prev_blocks = [] if original_index == 0 else world_states[original_index-1]["BlocksInGrid"]
last_action = get_last_action(curr_blocks, prev_blocks)
if last_action:
break
if not last_world_state:
for i2 in range(len(observation["ChatHistory"])):
chat_history.append(observation["ChatHistory"][i2].strip())
for block in built_config:
chat_with_actions_history.append({"idx": i, "action": "putdown", "type": block["type"], "built_config": built_config, "prev_config": None, "builder_position": builder_position, "last_action": last_action, "gold_placement_list": gold_placement_list, "gold_removal_list": gold_removal_list})
chat_with_actions_history.append({"idx": i, "action": "chat", "utterance": observation["ChatHistory"][i2].strip(), "built_config": built_config, "prev_config": None, "builder_position": builder_position, "last_action": last_action, "gold_placement_list": gold_placement_list, "gold_removal_list": gold_removal_list})
else:
prev_config = get_built_config(last_world_state)
config_diff = diff(gold_config=built_config, built_config=prev_config)
delta = {"putdown": config_diff["gold_minus_built"], "pickup": config_diff["built_minus_gold"]}
for action_type in delta:
for block in delta[action_type]:
chat_with_actions_history.append({"idx": i, "action": action_type, "type": block["type"], "built_config": built_config, "prev_config": prev_config, "builder_position": builder_position, "last_action": last_action, "gold_placement_list": gold_placement_list, "gold_removal_list": gold_removal_list})
if len(observation["ChatHistory"]) > len(last_world_state["ChatHistory"]):
for i3 in range(len(last_world_state["ChatHistory"]), len(observation["ChatHistory"])):
chat_history.append(observation["ChatHistory"][i3].strip())
chat_with_actions_history.append({"idx": i, "action": "chat", "utterance": observation["ChatHistory"][i3].strip(), "built_config": built_config, "prev_config": prev_config, "builder_position": builder_position, "last_action": last_action, "gold_placement_list": gold_placement_list, "gold_removal_list": gold_removal_list})
last_world_state = observation
# process dialogue line-by-line
for i in range(len(chat_with_actions_history)):
elem = chat_with_actions_history[i]
if elem['action'] != 'chat':
continue
idx = elem['idx']
line = elem['utterance']
built_config = elem["built_config"]
prev_config = elem["prev_config"]
builder_position = elem["builder_position"]
last_action = append_block_perspective_coords(builder_position, elem["last_action"])
gold_placement_list = [append_block_perspective_coords(builder_position, block) for block in elem["gold_placement_list"]]
gold_removal_list = [append_block_perspective_coords(builder_position, block) for block in elem["gold_removal_list"]]
speaker = "Architect" if "Architect" in line.split()[0] else "Builder"
if not self.add_builder_utterances and speaker == 'Builder':
continue
def valid_config(config):
if not config:
return True
for block in config:
x, y, z = block["x"]-x_min, block["y"]-y_min, block["z"]-z_min
if x < 0 or x >= x_range or y < 0 or y >= y_range or z < 0 or z >= z_range:
return False
return True
# temporary fix for troublesome configs
if not valid_config(built_config) or not valid_config(prev_config):
continue
prefix = architect_prefix if speaker == "Architect" else builder_prefix
next_utterance = line[len(prefix):]
#next_tokenized, _ = tokenize(next_utterance.lower() if lower else next_utterance)
next_tokenized = next_utterance # no tokenization
prev_utterances = []
prev_utterances.append({'speaker': 'Builder', 'utterance': ['<dialogue>']})
for k in range(i):
prev_elem = chat_with_actions_history[k]
if prev_elem['action'] != 'chat':
prev_utterances.append({'speaker': 'Builder', 'utterance': ['<builder_'+prev_elem['action']+'_'+prev_elem['type']+'>']})
else:
prev_utterance = prev_elem['utterance']
prev_speaker = "Architect" if "Architect" in prev_utterance.split()[0] else "Builder"
prev_utterance = prev_utterance[len(architect_prefix):] if prev_speaker == 'Architect' else prev_utterance[len(builder_prefix):]
prev_utterance = prev_utterance.lower() if lower else prev_utterance
prev_utterances.append({'speaker': prev_speaker, 'utterance': prev_utterance})
# diff
gold_v_built_diff, diffs_built_config_space, type_distributions_built_config_space, type_distributions_gold_config_space = None, None, None, None
if compute_diff:
gold_v_built_diff, perturbations_and_diffs = get_diff(gold_config=gold_config, built_config=built_config)
# get type distributions
diffs_built_config_space = list(map(lambda x: x.diff.diff_built_config_space, perturbations_and_diffs))
type_distributions_built_config_space = reformat_type_distributions(
get_type_distributions(diffs_built_config_space=diffs_built_config_space, built_config=built_config)
)
# reverse diff
_, perturbations_and_diffs_reverse = get_diff(gold_config=built_config, built_config=gold_config)
# get type distributions
diffs_gold_config_space = list(map(lambda x: x.diff.diff_built_config_space, perturbations_and_diffs_reverse))
type_distributions_gold_config_space = reformat_type_distributions(
get_type_distributions(diffs_built_config_space=diffs_gold_config_space, built_config=gold_config)
)
perspective_coordinates = None if not compute_perspective else get_perspective_coord_repr(builder_position)
samples.append(
{
'next_speaker': speaker, # architect or builder
'next_utterance': next_utterance, # utterance to be predicted
'prev_utterances': prev_utterances, # previous utterances
'gold_config': gold_config,
'built_config': built_config,
'diff': gold_v_built_diff, # diff based on one single optimal alignment
'last_action': last_action, # last block placement action
'gold_placement_list': gold_placement_list,
'gold_removal_list': gold_removal_list,
'builder_position': builder_position,
'perspective_coordinates': perspective_coordinates,
'type_distributions_built_config_space': type_distributions_built_config_space,
'type_distributions_gold_config_space': type_distributions_gold_config_space,
'from_aug_data': js['from_aug_data'],
'diffs_built_config_space': diffs_built_config_space, # all diffs based on all optimal alignments -- in the built config space
'json_id': j, # ID of the json this sample was obtained from
'sample_id': idx # ID assigned to this sample
} # NOTE: data format of a sample
)
except Exception:
print('Something went wrong processing this json, skipping...')
traceback.print_exc()
sys.exit(0)
except KeyboardInterrupt:
print('Exiting from processing json early... Not all samples have been added.')
return samples
def __len__(self):
""" Returns length of dataset. """
return len(self.samples)
def __getitem__(self, idx):
""" Computes the tensor representations of a sample """
sample = self.samples[idx]
# Convert utterance (string) to word IDs.
next_tokens = sample["next_utterance"]
next_utterance_input = []
next_utterance_input.append('<architect>') # NOTE: no need to change for LM using builder utterances too
next_utterance_input.extend([token for token in next_tokens])
next_utterance_output = []
next_utterance_output.extend([token for token in next_tokens])
next_utterance_output.append('</architect>') # NOTE: no need to change for LM using builder utterances too
i = 0
utterances_idx = len(sample["prev_utterances"])-1
utterances_to_add = []
prev_utterances = []
while i < self.num_prev_utterances:
if utterances_idx < 0:
break
prev = sample["prev_utterances"][utterances_idx]
speaker = prev["speaker"]
utterance = prev["utterance"]
if "<builder_" in utterance[0]:
if self.use_builder_actions:
utterances_to_add.insert(0, prev)
i -= 1
elif "mission has started ." in " ".join(utterance) and 'Builder' in speaker:
i -= 1
else:
utterances_to_add.insert(0, prev)
utterances_idx -= 1
i += 1
if self.online_data:
# use only one previous utterance for architect demo
utterances_to_add = [utterances_to_add[-1]]
for prev in utterances_to_add:
speaker = prev["speaker"]
utterance = prev["utterance"]
if "<dialogue>" in utterance[0]:
prev_utterances.append('<dialogue>')
elif "<builder_" in utterance[0]:
if self.use_builder_actions:
prev_utterances.append(utterance[0])
i -= 1
else:
start_token = '<architect>' if 'Architect' in speaker else '<builder>'
end_token = '</architect>' if 'Architect' in speaker else '</builder>'
prev_utterances.append(start_token)
prev_utterances.extend(token for token in utterance)
prev_utterances.append(end_token)
# temporary fix: floats in configs
for config_type in ['built_config', 'gold_config']:
config = sample[config_type]
for block in config:
for key in ['x', 'y', 'z']:
block[key] = int(block[key])
# built config
built_config = sample["built_config"]
built_config_repr = get_one_hot_repr(built_config) if self.model == 'seq2seq_world_state' else None
built_config_3d_repr = get_3d_repr(built_config, max_weight=self.blocks_max_weight, include_empty_channel=self.include_empty_channel) if self.model == 'cnn_3d' else None
# gold config
gold_config = sample["gold_config"] # NOTE: already sorted by layer
gold_config_repr = get_one_hot_repr(gold_config) if self.model == 'seq2seq_world_state' else None
gold_config_3d_repr = get_3d_repr(gold_config, include_empty_channel=self.include_empty_channel) if self.model == 'cnn_3d' else None
perspective_coord_repr = None
if isinstance(sample["perspective_coordinates"], np.ndarray):
perspective_coord_repr = torch.from_numpy(sample["perspective_coordinates"]).type(torch.FloatTensor)
type_distributions_built_config_space = sample['type_distributions_built_config_space']
type_distributions_gold_config_space = sample['type_distributions_gold_config_space']
built_config_type_dist, gold_config_type_dist = None, None
if isinstance(type_distributions_built_config_space, np.ndarray):
if not self.include_empty_channel:
type_distributions_built_config_space = type_distributions_built_config_space[:-1][:][:][:]
type_distributions_gold_config_space = type_distributions_gold_config_space[:-1][:][:][:]
built_config_type_dist = torch.from_numpy(type_distributions_built_config_space).type(torch.FloatTensor)
gold_config_type_dist = torch.from_numpy(type_distributions_gold_config_space).type(torch.FloatTensor)
# diff
diff = sample["diff"]
# last action
last_action = sample["last_action"]
next_actions_gold = {"gold_minus_built": sample["gold_placement_list"][:int(self.num_next_actions/2)], "built_minus_gold": sample["gold_removal_list"][:int(self.num_next_actions/2)]}
next_actions_gold_repr = get_next_actions_repr(next_actions_gold, last_action, action_type_sensitive=self.action_type_sensitive, use_condensed_action_repr=self.use_condensed_action_repr)
# next actions
next_actions, next_actions_repr = None, None
builder_position = sample['builder_position']
if diff and self.model == 'utterances_and_next_actions':
next_actions = get_next_actions(all_next_actions=diff, num_next_actions_needed=self.num_next_actions, last_action=last_action, built_config=built_config, feasible_next_placements=self.feasible_next_placements)
next_actions['gold_minus_built'] = [append_block_perspective_coords(builder_position, block) for block in next_actions['gold_minus_built']]
next_actions['built_minus_gold'] = [append_block_perspective_coords(builder_position, block) for block in next_actions['built_minus_gold']]
next_actions_repr = get_next_actions_repr(next_actions, last_action, action_type_sensitive=self.action_type_sensitive, use_condensed_action_repr=self.use_condensed_action_repr)
# block global counters
diffs_built_config_space = sample["diffs_built_config_space"]
block_counters = get_block_counters(diffs_built_config_space, built_config=built_config, built_config_in_region=built_config, extra_check=self.counters_extra_feasibility_check) if diff else None
# block region counters
block_counters_spatial_info = get_block_counters_spatial_info(diffs_built_config_space, built_config, last_action, builder_position, window_size=self.spatial_info_window_size, extra_check=self.counters_extra_feasibility_check) if diff else None
block_counters_spatial_tensors = [] # FIXME: UPDATE WHEN MORE REGIONS ARE CONSIDERED
if block_counters_spatial_info:
if self.use_existing_blocks_counter:
block_counters_spatial_tensors = [(torch.Tensor(x.block_counters.all_placements_counter), torch.Tensor(x.block_counters.all_next_placements_counter), torch.Tensor(x.block_counters.all_removals_counter), torch.Tensor(x.block_counters.all_existing_blocks_counter)) for x in block_counters_spatial_info]
else:
block_counters_spatial_tensors = [(torch.Tensor(x.block_counters.all_placements_counter), torch.Tensor(x.block_counters.all_next_placements_counter), torch.Tensor(x.block_counters.all_removals_counter)) for x in block_counters_spatial_info]
last_action_bit = [[1]] if not last_action else [[0]]
# pp.pprint(list(map(lambda x: x.__dict__, block_counters_spatial_info)))
# print(block_counters_spatial_tensors)
# print("\n\n\n\n")
# print("get_item")
# pp.PrettyPrinter(indent=4).pprint(prev_utterances)
# print(sorted(type2id.keys()))
# print(block_counters.all_placements_counter[0])
# print(block_counters.all_removals_counter[0])
from operator import add
all_actions = list( map(add, block_counters.all_placements_counter[0], block_counters.all_removals_counter[0]) )
colors_to_all_actions = dict(zip(sorted(type2id.keys()), all_actions))
return (
prev_utterances,
torch.tensor(built_config_repr) if built_config_repr else None,
torch.tensor(gold_config_repr) if gold_config_repr else None,
next_utterance_input, # utterance to be predicted -- formatted for decoder inputs
next_utterance_output, # utterance to be predicted -- formatted for decoder outputs
torch.Tensor(next_actions_repr["next_placements_repr"]) if next_actions_repr else None,
torch.Tensor(next_actions_repr["next_removals_repr"]) if next_actions_repr else None,
torch.Tensor(next_actions_gold_repr["next_placements_repr"]),
torch.Tensor(next_actions_gold_repr["next_removals_repr"]),
torch.Tensor(block_counters.all_placements_counter) if block_counters else None, # global block counters
torch.Tensor(block_counters.all_next_placements_counter) if block_counters else None, # global block counters
torch.Tensor(block_counters.all_removals_counter) if block_counters else None, # global block counters
block_counters_spatial_tensors, # regional block counters
torch.Tensor(last_action_bit), # encoding of last action
RawInputs(next_actions, next_actions_gold, json_id=sample.get('json_id'), sample_id=sample.get('sample_id'), next_utterance_raw=sample.get('next_utterance_raw'), built_config_ss=built_config, gold_config_ss=gold_config, colors_to_all_actions=colors_to_all_actions), # raw inputs for downstream purposes
built_config_3d_repr,
gold_config_3d_repr,
perspective_coord_repr,
built_config_type_dist,
gold_config_type_dist
) # NOTE: data format of an item
def collate_fn(self, data):
"""Creates a mini-batch of items (batch size = 1 for now)
Returns:
A tuple of the following:
- inputs to the encoder
- ground truth inputs to the decoder RNN
- ground truth outputs for the decoder RNN
- some inputs in raw format for downstream use cases
"""
def merge_text(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
return padded_seqs, lengths
def merge_configs(sequences):
if not isinstance(sequences[0], torch.Tensor) and not sequences[0]:
return None, None
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths), sequences[0].size()[1])
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end, :] = seq
return padded_seqs, lengths
def stack_reprs(reprs):
if not isinstance(reprs[0], torch.Tensor) and not reprs[0]:
return None
return torch.stack(reprs, 0)
# Sort a data list by utterance length in descending order.
prev_utterances, built_configs, gold_configs, target_inputs, target_outputs, next_placements, next_removals, gold_placements, gold_removals, all_placements_counter, all_next_placements_counter, all_removals_counter, block_counters_spatial_tensors, last_action_bits, raw_inputs, built_configs_3d, gold_configs_3d, perspective_coord_reprs, built_config_type_dists, gold_config_type_dists = zip(*data)
#prev_utterances, prev_utterances_lengths = merge_text(prev_utterances)
built_configs, built_config_lengths = merge_configs(built_configs)
gold_configs, gold_config_lengths = merge_configs(gold_configs)
next_placements, next_placements_lengths = merge_configs(next_placements)
next_removals, next_removals_lengths = merge_configs(next_removals)
gold_placements, gold_placements_lengths = merge_configs(gold_placements)
gold_removals, gold_removals_lengths = merge_configs(gold_removals)
all_placements_counter = stack_reprs(all_placements_counter)
all_next_placements_counter = stack_reprs(all_next_placements_counter)
all_removals_counter = stack_reprs(all_removals_counter)
next_actions = {
"next_placements": next_placements,
"next_removals": next_removals
}
next_actions_lengths = {
"next_placements_lengths": next_placements_lengths,
"next_removals_lengths": next_removals_lengths
}
gold_actions = {
"gold_placements": gold_placements,
"gold_removals": gold_removals
}
gold_actions_lengths = {
"gold_placements_lengths": gold_placements_lengths,
"gold_removals_lengths": gold_removals_lengths
}
block_counters = {
"all_placements_counter": all_placements_counter,
"all_next_placements_counter": all_next_placements_counter,
"all_removals_counter": all_removals_counter
}
block_counters_spatial_tensors = block_counters_spatial_tensors[0]
if self.use_existing_blocks_counter:
block_counters_spatial_tensors = [(w.unsqueeze(0), x.unsqueeze(0), y.unsqueeze(0), z.unsqueeze(0)) for (w,x,y,z) in block_counters_spatial_tensors]
else:
block_counters_spatial_tensors = [(x.unsqueeze(0), y.unsqueeze(0), z.unsqueeze(0)) for (x,y,z) in block_counters_spatial_tensors]
last_action_bits = stack_reprs(last_action_bits)
built_configs_3d = stack_reprs(built_configs_3d)
gold_configs_3d = stack_reprs(gold_configs_3d)
perspective_coord_reprs = stack_reprs(perspective_coord_reprs)
built_config_type_dists = stack_reprs(built_config_type_dists)
gold_config_type_dists = stack_reprs(gold_config_type_dists)
#target_inputs, target_lengths = merge_text(target_inputs)
#target_outputs, target_lengths = merge_text(target_outputs)
raw_inputs = raw_inputs[0]
return (
EncoderInputs(prev_utterances,built_configs, built_config_lengths, gold_configs, gold_config_lengths, next_actions, next_actions_lengths, gold_actions, gold_actions_lengths, block_counters, block_counters_spatial_tensors, last_action_bits, built_configs_3d, gold_configs_3d, perspective_coord_reprs, built_config_type_dists, gold_config_type_dists),
DecoderInputs(target_inputs),
DecoderOutputs(target_outputs),raw_inputs)
def get_data_loader(self, batch_size=1, shuffle=True, num_workers=1):
# Data loader for CwC Dataset.
# This will return (targets, lengths) for every iteration.
# targets: torch tensor of shape (batch_size, padded_length).
# lengths: list of valid lengths for each padded utterance, sorted in descending order. Length is (batch_size).
return DataLoader(dataset=self, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, collate_fn=self.collate_fn)
# FIXME: Move this to a dedicated data augmentation part
def augment_dataset(self, k, vocab_dir="../vocabulary/"):
# data structures to hold info on the data augmentation
self.word_counts_augmentation = defaultdict(int)
self.tokenized_data_augmentation = []
# generate synthetic utterances -- update above data structures
for original_sample in self.samples:
tokenized_utterance = original_sample["next_utterance"]
# get new examples
new_examples = self.get_new_examples(tokenized_utterance, k)
# add to dataset
self.tokenized_data_augmentation += new_examples
for example in new_examples:
for word in example:
self.word_counts_augmentation[word] += 1
# augment original dataset with the synthetic utterances
for tokenized_synthetic_utterance in self.tokenized_data_augmentation:
synthetic_sample = {
'next_speaker': self.samples[0]['next_speaker'], # dummy value
'next_utterance': tokenized_synthetic_utterance,
'prev_utterances': self.samples[0]['prev_utterances'], # dummy value
'gold_config': self.samples[0]['gold_config'], # dummy value
'built_config': self.samples[0]['built_config'] # dummy value
}
self.samples.append(synthetic_sample)
# write synthetic utterances to file
print("Writing synthetic utterances to file...")
split = "-" + self.split
lower = "-lower" if self.lower else ""
add_builder_utterances = "-add_builder_utterances" if self.add_builder_utterances else ""
augmentation_factor = "-" + str(self.augmentation_factor)
with open(vocab_dir+"/synthetic_utterances" + split + lower + add_builder_utterances + augmentation_factor + ".txt", 'w') as f:
for tokenized_utterance in self.tokenized_data_augmentation:
to_write = pprint.pformat(tokenized_utterance) + "\n\n"
f.write(to_write)
print("Done writing!")
# FIXME: Move this to a dedicated data augmentation part
def get_new_examples(self, tokenized_utterance, k):
def f(token):
# map token to list of all possible substitutions
token_substitutions = [token]
if token in self.substitutions:
token_substitutions += self.substitutions[token]
return token_substitutions
# map each token to a list of it's substitutions including itself
substitutions_list = list(map(f, tokenized_utterance))
# generate all possible combinations -- cartesian product of a 2d list
samples_list = list(map(lambda x: np.random.choice(x, size=k, replace=True).tolist(), substitutions_list))
new_examples = list(map(list, [*zip(*samples_list)]))
# filter out duplicate examples
new_examples = list(filter(lambda x: x != tokenized_utterance, new_examples)) # filter out original utterance
new_examples = [list(x) for x in set(tuple(x) for x in new_examples)] # select only unique new synthetic utterances
return new_examples
def printCoords(self):
print(self.dpxs_placement)
class EncoderInputs:
def __init__(self, prev_utterances, built_configs,built_config_lengths, gold_configs, gold_config_lengths, next_actions, next_actions_lengths, gold_actions, gold_actions_lengths, block_counters, block_counters_spatial_tensors, last_action_bits, built_configs_3d, gold_configs_3d, perspective_coord_reprs, built_config_type_dists, gold_config_type_dists):
self.prev_utterances = prev_utterances # previous utterances
self.built_configs = built_configs # built config
self.built_config_lengths = built_config_lengths
self.gold_configs = gold_configs # gold config
self.gold_config_lengths = gold_config_lengths
self.next_actions = next_actions
self.next_actions_lengths = next_actions_lengths
self.gold_actions = gold_actions
self.gold_actions_lengths = gold_actions_lengths
self.block_counters = block_counters # global block counters
self.block_counters_spatial_tensors = block_counters_spatial_tensors # regional block counters
self.last_action_bits = last_action_bits # last action encoding
self.built_configs_3d = built_configs_3d
self.gold_configs_3d = gold_configs_3d
self.perspective_coord_reprs = perspective_coord_reprs
self.built_config_type_dists = built_config_type_dists
self.gold_config_type_dists = gold_config_type_dists
class DecoderInputs:
def __init__(self, target_inputs,target_inputs_neg=None):
self.target_inputs = target_inputs # ground truth inputs for decoder RNN
self.target_inputs_neg = target_inputs_neg
class DecoderOutputs:
def __init__(self, target_outputs,target_outputs_neg=None):
self.target_outputs = target_outputs # ground truth outputs for decoder RNN
self.target_outputs_neg = target_outputs_neg
class RawInputs:
"""
Raw representations of various inputs for downstream use cases
"""
def __init__(self, next_actions_raw, gold_next_actions_raw, json_id=None, sample_id=None, next_utterance_raw=None, built_config_ss=None, gold_config_ss=None, colors_to_all_actions=None):
self.next_actions_raw = next_actions_raw
self.gold_next_actions_raw = gold_next_actions_raw
self.json_id = json_id # json id of the game log from where this train/test example was obtained
self.sample_id = sample_id # sample id of the train/test example
self.next_utterance_raw = next_utterance_raw # next utterance to be predicted
self.built_config_ss = built_config_ss
self.gold_config_ss = gold_config_ss
self.colors_to_all_actions = colors_to_all_actions
# UTILS
class Region:
"""
Stores a specfic region in 3d space
"""
def __init__(self, x_min, x_max, y_min, y_max, z_min, z_max, block_counters=None, region_id=None):
"""
- Bounds of the region
- Block counters for the region
- A unique ID based on whether the region is left, right, etc. of the last action
"""
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
self.z_min = z_min
self.z_max = z_max
assert self.x_min <= self.x_max and self.y_min <= self.y_max and self.z_min <= self.z_max, "Invalid x/y/z bounds for Region object."
self.block_counters = block_counters
self.region_id = region_id
def set_block_counters(self, diffs_built_config_space, built_config, extra_check):
"""
Compute and set block counters for region
"""
# filter actions in diffs to only include actions within the region
region_diffs = list(map(lambda x: self.get_region_diff(x), diffs_built_config_space))
# filter blocks in built config to only include blocks within the region
built_config_in_region = list(filter(lambda x: self.is_in_region(x), built_config))
# get block counters
self.block_counters = get_block_counters(region_diffs, built_config=built_config, built_config_in_region=built_config_in_region, extra_check=extra_check)
def get_region_diff(self, diff):
"""
Reduce a diff to being specific to the region
"""
all_placements = diff["gold_minus_built"]
all_removals = diff["built_minus_gold"]
placements_in_region = list(filter(lambda x: self.is_in_region(x), all_placements))
removals_in_region = list(filter(lambda x: self.is_in_region(x), all_removals))
region_diff = {
"gold_minus_built": placements_in_region,
"built_minus_gold": removals_in_region
}
return region_diff
def is_in_region(self, action):
"""
Check if an action is within the region or not
"""
return action["x"] in range(self.x_min, self.x_max + 1) and \
action["y"] in range(self.y_min, self.y_max + 1) and \
action["z"] in range(self.z_min, self.z_max + 1)
def get_region_id(self, builder_position, last_action):
"""
Compute the unique ID for the region based on whether it is left, right, etc. of the last action
"""
# discretize builder's yaw into the 4 canonical directions
builder_yaw = builder_position["yaw"]
builder_yaw_discrete = discretize_yaw(builder_yaw)
# given a canonical yaw direction and delta vectors wrt that direction, infer which cell is left, which is right and so on
diff_vector = {
"x": (self.x_max + self.x_min) / 2 - last_action["x"], # diff using mean of region
"y": (self.y_max + self.y_min) / 2 - last_action["y"],
"z": (self.z_max + self.z_min) / 2 - last_action["z"]
}
# infer what is left, right, etc. based on canonical yaw direction
if builder_yaw_discrete == 0:
diff_vector_to_direction = {
"+x": "left",
"-x": "right",
"+z": "front",
"-z": "back"
}
elif builder_yaw_discrete == 90:
diff_vector_to_direction = {
"+x": "back",
"-x": "front",
"+z": "left",
"-z": "right"
}
elif builder_yaw_discrete == 180:
diff_vector_to_direction = {
"+x": "right",
"-x": "left",
"+z": "back",
"-z": "front"
}
elif builder_yaw_discrete == -90:
diff_vector_to_direction = {
"+x": "front",
"-x": "back",
"+z": "right",
"-z": "left"
}
diff_vector_to_direction["+y"] = "top"
diff_vector_to_direction["-y"] = "down"
# convert diff vector to one left, right, etc. and then convert to a unique id
# when last action cell itself
if diff_vector["x"] == 0 and diff_vector["y"] == 0 and diff_vector["z"] == 0:
self.region_id = direction_to_id["null"]
# when adjacent cells or rows/columns
if diff_vector["x"] != 0 and diff_vector["y"] == 0 and diff_vector["z"] == 0:
if diff_vector["x"] > 0:
if diff_vector["x"] == 1:
self.region_id = direction_to_id[diff_vector_to_direction["+x"]]
else:
self.region_id = direction_to_id[diff_vector_to_direction["+x"] + "_row"]
else:
if diff_vector["x"] == -1:
self.region_id = direction_to_id[diff_vector_to_direction["-x"]]
else:
self.region_id = direction_to_id[diff_vector_to_direction["-x"] + "_row"]
elif diff_vector["x"] == 0 and diff_vector["y"] != 0 and diff_vector["z"] == 0:
if diff_vector["y"] > 0:
if diff_vector["y"] == 1:
self.region_id = direction_to_id["top"]
else:
self.region_id = direction_to_id["top_column"]
else:
if diff_vector["y"] == -1:
self.region_id = direction_to_id["down"]
else:
self.region_id = direction_to_id["down_column"]
elif diff_vector["x"] == 0 and diff_vector["y"] == 0 and diff_vector["z"] != 0:
if diff_vector["z"] > 0:
if diff_vector["z"] == 1:
self.region_id = direction_to_id[diff_vector_to_direction["+z"]]
else:
self.region_id = direction_to_id[diff_vector_to_direction["+z"] + "_row"]
else:
if diff_vector["z"] == -1:
self.region_id = direction_to_id[diff_vector_to_direction["-z"]]
else:
self.region_id = direction_to_id[diff_vector_to_direction["-z"] + "_row"]
# when adjacent quadrants
if diff_vector["x"] != 0 and diff_vector["y"] != 0 and diff_vector["z"] == 0:
signed_x = "+x" if diff_vector["x"] > 0 else "-x"
signed_y = "+y" if diff_vector["y"] > 0 else "-y"
self.region_id = direction_to_id[
stringify((diff_vector_to_direction[signed_x], diff_vector_to_direction[signed_y]))
]
elif diff_vector["x"] == 0 and diff_vector["y"] != 0 and diff_vector["z"] != 0:
signed_y = "+y" if diff_vector["y"] > 0 else "-y"
signed_z = "+z" if diff_vector["z"] > 0 else "-z"
self.region_id = direction_to_id[
stringify((diff_vector_to_direction[signed_y], diff_vector_to_direction[signed_z]))
]
elif diff_vector["x"] != 0 and diff_vector["y"] == 0 and diff_vector["z"] != 0:
signed_z = "+z" if diff_vector["z"] > 0 else "-z"
signed_x = "+x" if diff_vector["x"] > 0 else "-x"
self.region_id = direction_to_id[
stringify((diff_vector_to_direction[signed_z], diff_vector_to_direction[signed_x]))
]
# when adjacent octants
if diff_vector["x"] != 0 and diff_vector["y"] != 0 and diff_vector["z"] != 0:
signed_x = "+x" if diff_vector["x"] > 0 else "-x"
signed_y = "+y" if diff_vector["y"] > 0 else "-y"
signed_z = "+z" if diff_vector["z"] > 0 else "-z"
self.region_id = direction_to_id[
stringify((diff_vector_to_direction[signed_x], diff_vector_to_direction[signed_y], diff_vector_to_direction[signed_z]))
]
return self.region_id
# obtain mapping of relative directions to ID
# for last action cell itself
null = "null"
# for adjacent cells
lr = ["left", "right"]
td = ["top", "down"]
fb = ["front", "back"]
# for adjacent rows/columns
lr_rows = list(map(lambda x: x + "_row", lr))
td_columns = list(map(lambda x: x + "_column", td))
fb_rows = list(map(lambda x: x + "_row", fb))
# for adjacent quadrants
def stringify(directions):
"""
Converts a bunch of directions into a unique identifier string -- irrespective of how directions are ordered in the iterable
NOTE: DO NOT CHANGE THIS LOGIC WITHOUT THOUGHT
"""
return "_".join(sorted(list(directions)))
lr_td = list(map(stringify, list(itertools.product(lr, td))))
td_fb = list(map(stringify, list(itertools.product(td, fb))))
fb_lr = list(map(stringify, list(itertools.product(fb, lr))))
# for adjacent octants
lr_td_fb = list(map(stringify, list(itertools.product(lr, td, fb))))
# unify all and get a map
all_directions = [null] + lr + td + fb + lr_rows + td_columns + fb_rows + lr_td + td_fb + fb_lr + lr_td_fb # NOTE: DO NOT CHANGE THIS ORDERING WITHOUT THOUGHT!
direction_to_id = {k: v for v, k in enumerate(all_directions)}
def discretize_yaw(yaw):
"""
Discretize a yaw angle into the 4 canonical yaw angles/directions
"""
# normalize to [0, 360]
if yaw < 0:
yaw_normalized = 360 + yaw
else:
yaw_normalized = yaw
# discretize
if (yaw_normalized >= 270 + 45 and yaw_normalized <= 360) or (yaw_normalized >= 0 and yaw_normalized < 0 + 45):
return 0
elif yaw_normalized >= 0 + 45 and yaw_normalized < 90 + 45:
return 90
elif yaw_normalized >= 90 + 45 and yaw_normalized < 180 + 45:
return 180
else:
return -90
def get_block_counters_spatial_info(diffs_built_config_space, built_config, last_action, builder_position, window_size, extra_check):
"""
Obtain block counters based spatial info
"""
# degenerate case
if not last_action:
last_action = {
"x": 0,
"y": 1,
"z": 0
}
# obtain regions adjacent to last action
adjacent_regions = get_adjacent_regions(last_action, window_size)
# get counters for each region
list(
map(
lambda x: x.set_block_counters(diffs_built_config_space, built_config, extra_check), # mutating
adjacent_regions
)
)
# obtain canonical ordering of regions -- based on directions
adjacent_regions = sorted(adjacent_regions, key = lambda x: x.get_region_id(builder_position, last_action)) # mutating
return adjacent_regions
def get_adjacent_regions(action, window_size):
"""
Returns a list of the 6 adjacent cells + 6 adjacent rows/columns
"""
assert window_size >= 1, "Spatial info window size < 1 is not supported."
action_cell = Region(x_min = action["x"], x_max = action["x"], y_min = action["y"], y_max = action["y"], z_min = action["z"], z_max = action["z"])
if window_size >= 1:
cells = [
Region(x_min = action["x"] + 1, x_max = action["x"] + 1, y_min = action["y"], y_max = action["y"], z_min = action["z"], z_max = action["z"]),
Region(x_min = action["x"] - 1, x_max = action["x"] - 1, y_min = action["y"], y_max = action["y"], z_min = action["z"], z_max = action["z"]),
Region(x_min = action["x"], x_max = action["x"], y_min = action["y"] + 1, y_max = action["y"] + 1, z_min = action["z"], z_max = action["z"]),
Region(x_min = action["x"], x_max = action["x"], y_min = action["y"] - 1, y_max = action["y"] - 1, z_min = action["z"], z_max = action["z"]),
Region(x_min = action["x"], x_max = action["x"], y_min = action["y"], y_max = action["y"], z_min = action["z"] + 1, z_max = action["z"] + 1),
Region(x_min = action["x"], x_max = action["x"], y_min = action["y"], y_max = action["y"], z_min = action["z"] - 1, z_max = action["z"] - 1)
]
quadrants = []
for sign_x in [1, -1]:
for sign_y in [1, -1]:
quadrants.append(
Region(
x_min = action["x"] + 1 if sign_x == 1 else action["x"] - window_size,
x_max = action["x"] + window_size if sign_x == 1 else action["x"] - 1,
y_min = action["y"] + 1 if sign_y == 1 else action["y"] - window_size,
y_max = action["y"] + window_size if sign_y == 1 else action["y"] - 1,
z_min = action["z"],
z_max = action["z"]
)
)
for sign_y in [1, -1]:
for sign_z in [1, -1]:
quadrants.append(
Region(
x_min = action["x"],
x_max = action["x"],
y_min = action["y"] + 1 if sign_y == 1 else action["y"] - window_size,
y_max = action["y"] + window_size if sign_y == 1 else action["y"] - 1,
z_min = action["z"] + 1 if sign_z == 1 else action["z"] - window_size,
z_max = action["z"] + window_size if sign_z == 1 else action["z"] - 1
)
)
for sign_z in [1, -1]:
for sign_x in [1, -1]:
quadrants.append(
Region(
x_min = action["x"] + 1 if sign_x == 1 else action["x"] - window_size,
x_max = action["x"] + window_size if sign_x == 1 else action["x"] - 1,
y_min = action["y"],
y_max = action["y"],
z_min = action["z"] + 1 if sign_z == 1 else action["z"] - window_size,
z_max = action["z"] + window_size if sign_z == 1 else action["z"] - 1
)
)
octants = []
for sign_x in [1, -1]:
for sign_y in [1, -1]:
for sign_z in [1, -1]:
octants.append(
Region(
x_min = action["x"] + 1 if sign_x == 1 else action["x"] - window_size,
x_max = action["x"] + window_size if sign_x == 1 else action["x"] - 1,
y_min = action["y"] + 1 if sign_y == 1 else action["y"] - window_size,
y_max = action["y"] + window_size if sign_y == 1 else action["y"] - 1,
z_min = action["z"] + 1 if sign_z == 1 else action["z"] - window_size,
z_max = action["z"] + window_size if sign_z == 1 else action["z"] - 1
)
)
else:
cells = []
quadrants = []
octants = []
if window_size >= 2:
rows_and_columns = [
Region(x_min = action["x"] + 2, x_max = action["x"] + window_size, y_min = action["y"], y_max = action["y"], z_min = action["z"], z_max = action["z"]),
Region(x_min = action["x"] - window_size, x_max = action["x"] - 2, y_min = action["y"], y_max = action["y"], z_min = action["z"], z_max = action["z"]),
Region(x_min = action["x"], x_max = action["x"], y_min = action["y"] + 2, y_max = action["y"] + window_size, z_min = action["z"], z_max = action["z"]),
Region(x_min = action["x"], x_max = action["x"], y_min = action["y"] - window_size, y_max = action["y"] - 2, z_min = action["z"], z_max = action["z"]),
Region(x_min = action["x"], x_max = action["x"], y_min = action["y"], y_max = action["y"], z_min = action["z"] + 2, z_max = action["z"] + window_size),
Region(x_min = action["x"], x_max = action["x"], y_min = action["y"], y_max = action["y"], z_min = action["z"] - window_size, z_max = action["z"] - 2)
]
else:
rows_and_columns = []
all_regions = [action_cell] + cells + rows_and_columns + quadrants + octants
return all_regions
def get_block_counters(diffs_built_config_space, built_config, built_config_in_region, extra_check):
"""
Compute block counters for placements, next placements, removals and existing blocks in a region
Args:
built_config: Full built config -- used for feasibility checks for computing next placements
built_config_in_region: Built config in the specific region -- used for computing existing blocks counter
"""
counters_per_diff = list(map(lambda x: region_to_counters(x, built_config, built_config_in_region, extra_check).__dict__, diffs_built_config_space))
results = BlockCounters(None, None, None, None) # stores final result to return
for field in ["all_placements_counter", "all_next_placements_counter", "all_removals_counter", "all_existing_blocks_counter"]:
# obtain counters per diff per actions type
counters_per_field = list(map(lambda x: x[field], counters_per_diff))
# aggregate all and take expectations
expectation_counter = sum(counters_per_field, Counter())
for key in expectation_counter:
expectation_counter[key] /= len(counters_per_field)
# populate result obj
if field == "all_placements_counter":
results.all_placements_counter = expectation_counter
elif field == "all_next_placements_counter":
results.all_next_placements_counter = expectation_counter
elif field == "all_removals_counter":
results.all_removals_counter = expectation_counter
elif field == "all_existing_blocks_counter":
results.all_existing_blocks_counter = expectation_counter
# reformat result obj
def reformat(counter):
all_colors_counter = [[]]
for color in sorted(type2id.keys()):
all_colors_counter[0].append(float(counter[color]))
return all_colors_counter
results.all_placements_counter = reformat(results.all_placements_counter)
results.all_next_placements_counter = reformat(results.all_next_placements_counter)
results.all_removals_counter = reformat(results.all_removals_counter)
results.all_existing_blocks_counter = reformat(results.all_existing_blocks_counter)
return results
def region_to_counters(a_diff, built_config, built_config_in_region, extra_check):
"""
Computer block counters for placements, next placements and removals for an optimal alignment
"""
def f(actions_list):
actions_list_colors = list(map(lambda x: x["type"], actions_list))
return Counter(actions_list_colors)
# obtain all actions
all_placements = a_diff["gold_minus_built"]
all_next_placements = list(filter(lambda x: is_feasible_next_placement(x, built_config, extra_check), all_placements))
all_removals = a_diff["built_minus_gold"]
# map each set of actions to counters
counts_all_placements = f(all_placements)
counts_all_next_placements = f(all_next_placements)
counts_all_removals = f(all_removals)
# do same for existing blocks in region
counts_all_existing_blocks = f(built_config_in_region)
return BlockCounters(counts_all_placements, counts_all_next_placements, counts_all_removals, counts_all_existing_blocks)
class BlockCounters:
"""
Stores block counters for all action types
"""
def __init__(self, all_placements_counter, all_next_placements_counter, all_removals_counter, all_existing_blocks_counter):
self.all_placements_counter = all_placements_counter
self.all_next_placements_counter = all_next_placements_counter
self.all_removals_counter = all_removals_counter
self.all_existing_blocks_counter = all_existing_blocks_counter
def reformat_type_distributions(type_distributions_built_config_space):
"""
Args:
type_distributions_built_config_space: Type distributions in built config space in the raw format
Returns:
a 4-d numpy array representation of the same with dimensions in the order type, x, y, z
"""
type_distributions_arr_built_config_space = np.zeros((len(type2id)+1, x_range, y_range, z_range))
for elem in type_distributions_built_config_space:
x = elem.grid_location["x"] - x_min
y = elem.grid_location["y"] - y_min
z = elem.grid_location["z"] - z_min
for type in elem.type_distribution:
type_id = len(type2id) if type == "empty" else type2id[type]
probability = elem.type_distribution[type]
type_distributions_arr_built_config_space[type_id][x][y][z] = probability
return type_distributions_arr_built_config_space
def remove_empty_states(observations):
observations["WorldStates"] = list(filter(lambda x: x["BuilderPosition"] != None, observations["WorldStates"]))
return observations
def reorder(observations):
"""
Returns the observations dict by reordering blocks temporally in every state
"""
for i, state in enumerate(observations["WorldStates"]):
prev_blocks = [] if i == 0 else observations["WorldStates"][i-1]["BlocksInGrid"]
# pp.PrettyPrinter(indent=4).pprint(state)
curr_blocks = state["BlocksInGrid"]
curr_blocks_reordered = reorder_blocks(curr_blocks, prev_blocks) # obtain temporal ordering of blocks
observations["WorldStates"][i]["BlocksInGrid"] = curr_blocks_reordered # mutate - will be used in next iteration
return observations
def reorder_blocks(curr_blocks, prev_blocks):
"""
Returns a sorted version of the list of current blocks based on their order in the list of previous blocks.
The assumption is that previous blocks are already sorted temporally.
So this preserves that order for those blocks and puts any newly placed ones at the very end.
"""
return sorted(curr_blocks, key = lambda x: index(x, prev_blocks))
def index(curr_block, prev_blocks):
"""
Returns position of current block in the list of previous blocks.
If not found in the list, returns a very large number (meaning it's a newly placed block and should be placed at the end when sorting temporally).
"""
for i, prev_block in enumerate(prev_blocks):
if are_equal(curr_block, prev_block):
return i
return 999
def are_equal(block_1, block_2):
"""
Returns a comparison result between 2 blocks by ignoring the ever changing perspective coordinates
"""
return reformat(block_1) == reformat(block_2)
def get_last_action(curr_blocks, prev_blocks):
curr_blocks = list(map(reformat, curr_blocks))
prev_blocks = list(map(reformat, prev_blocks))
diff_dict = diff(gold_config = curr_blocks, built_config = prev_blocks)
all_actions = diff_dict["gold_minus_built"] + diff_dict["built_minus_gold"]
return all_actions[0] if all_actions else None
def get_gold_actions(world_states):
gold_placements, gold_removals = [], []
next_world_state = None
for i, world_state in reversed(list(enumerate(world_states))):
# print(i, world_state["BlocksInGrid"])
if not next_world_state:
gold_placements.append([])
gold_removals.append([])
else:
next_blocks = list(map(reformat, next_world_state['BlocksInGrid']))
curr_blocks = list(map(reformat, world_state['BlocksInGrid']))
diff_dict = diff(gold_config=next_blocks, built_config=curr_blocks)
diff_dict['gold_minus_built'].extend(gold_placements[0])
diff_dict['built_minus_gold'].extend(gold_removals[0])
curr_blocks = set(map(dict_to_tuple, curr_blocks))
removed_blocks = list(map(dict_to_tuple, diff_dict['built_minus_gold']))
removed_existing = []
for i2 in range(len(diff_dict['built_minus_gold'])):
if removed_blocks[i2] in curr_blocks:
removed_existing.append(diff_dict['built_minus_gold'][i2])
gold_placements.insert(0, diff_dict['gold_minus_built'])
gold_removals.insert(0, removed_existing)
next_world_state = world_state
return gold_placements, gold_removals
def format_prev_utterances(prev_utterances):
for token in prev_utterances:
print(self.encoder_vocab.idx2word[token], end=' ')
print('\n')
if __name__ == '__main__':
"""
Use this section to generate datasets and for debugging purposes.
BE CAREFUL TO NOT OVERWRITE EXISTING DATASETS AS DATASETS ARE NOT VERSION CONTROLLED.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='cnn_3d', help='model type') # seq2seq_all_inputs
parser.add_argument('--split', default='train', help='dataset split')
parser.add_argument('--dump_dataset', default=True, action='store_true', help='build the dataset')
parser.add_argument('--lower', default=False, action='store_true', help='lowercase the dataset')
parser.add_argument('--add_builder_utterances', default=False, action='store_true', help='add builder utterances')
parser.add_argument('--add_augmented_data', default=False, action='store_true', help='add dialog-level augmented dataset')
parser.add_argument('--ignore_diff', default=False, action='store_true', help='skip computing diff')
parser.add_argument('--ignore_perspective', default=False, action='store_true', help='skip computing perspective coordinates')
parser.add_argument('--load_dataset', default=False, action='store_true', help='load a dataset')
parser.add_argument('--augmented_data_fraction', type=float, default=0.0, help='fraction of augmented data to use')
parser.add_argument('--saved_dataset_dir', default="../data/saved_cwc_datasets/lower-allinputs/", help='location of saved dataset')
parser.add_argument('--num_prev_utterances', type=int, default=5, help='number of previous utterances to use as input')
parser.add_argument('--blocks_max_weight', type=int, default=5, help='max weight of temporally weighted blocks')
parser.add_argument('--use_builder_actions', default=False, action='store_true', help='include builder action tokens in the dialogue history')
parser.add_argument('--feasible_next_placements', default=False, action='store_true', help='whether or not to select from pool of feasible next placements only')
parser.add_argument('--num_next_actions', type=int, default=2, help='number of next actions needed')
parser.add_argument('--use_condensed_action_repr', default=False, action='store_true', help='use condensed action representation instead of one-hot')
parser.add_argument('--action_type_sensitive', default=False, action='store_true', help='use action-type-sensitive representations for blocks')
parser.add_argument('--spatial_info_window_size', type=int, default=1000, help='3d window size to extract spatial information from')
parser.add_argument('--use_existing_blocks_counter', default=False, action='store_true', help='include counters for existing blocks')
parser.add_argument('--counters_extra_feasibility_check', default=False, action='store_true', help='whether or not to make the extra check for conficting blocks')
parser.add_argument('--encoder_vocab', default=None, help='encoder vocab')
parser.add_argument('--decoder_vocab', default=None, help='decoder vocab')
parser.add_argument('--seed', type=int, default=1234, help='random seed')
args = parser.parse_args()
initialize_rngs(args.seed, torch.cuda.is_available())
if args.use_builder_actions and 'builder_actions' not in args.encoder_vocab:
print("Error: you specified to use builder action tokens in the dialogue history, but they do not exist in the encoder's vocabulary.")
sys.exit(0)
dataset = CwCDataset(
model=args.model, split=args.split, lower=args.lower, add_builder_utterances=args.add_builder_utterances, compute_diff=not args.ignore_diff, compute_perspective=not args.ignore_perspective,
encoder_vocab=None, decoder_vocab=None, dump_dataset=args.dump_dataset, load_dataset=args.load_dataset,
saved_dataset_dir=args.saved_dataset_dir, transform=None, sample_filters = [], add_augmented_data=args.add_augmented_data, augmented_data_fraction=args.augmented_data_fraction
)
dataset.set_args(num_prev_utterances=args.num_prev_utterances, blocks_max_weight=args.blocks_max_weight, use_builder_actions=args.use_builder_actions, num_next_actions=args.num_next_actions, use_condensed_action_repr=args.use_condensed_action_repr, action_type_sensitive=args.action_type_sensitive, feasible_next_placements=args.feasible_next_placements, spatial_info_window_size=args.spatial_info_window_size, counters_extra_feasibility_check=args.counters_extra_feasibility_check, use_existing_blocks_counter=args.use_existing_blocks_counter)
dl = dataset.get_data_loader(shuffle=False)
for i in range(10):
print('json id:', dataset.get_sample(i)['json_id'])
print('sample id:', dataset.get_sample(i)['sample_id'])
js = dataset.jsons[dataset.get_sample(i)['json_id']]
print(js['gold_config_name'])
print(js['WorldStates'][dataset.get_sample(i)['sample_id']])
for i, (encoder_inputs, decoder_inputs, decoder_outputs, raw_inputs) in enumerate(dl):
print(raw_inputs.json_id, raw_inputs.sample_id)
if i == 10:
sys.exit(0)
# pass
| 44.900287
| 545
| 0.741976
|
541f7f05c234d3bdfad58258582dc0d3d0f6a3fc
| 2,936
|
py
|
Python
|
bin/tetris.py
|
ponte-vecchio/pydisc-code-jam
|
697dc0e5bf020a0e9d5011d7f94990b0636a55b1
|
[
"MIT"
] | 1
|
2021-11-07T05:05:09.000Z
|
2021-11-07T05:05:09.000Z
|
bin/tetris.py
|
ponte-vecchio/pydisc-code-jam
|
697dc0e5bf020a0e9d5011d7f94990b0636a55b1
|
[
"MIT"
] | 8
|
2021-07-11T10:34:53.000Z
|
2021-07-17T05:38:09.000Z
|
bin/tetris.py
|
ponte-vecchio/pydisc-code-jam
|
697dc0e5bf020a0e9d5011d7f94990b0636a55b1
|
[
"MIT"
] | 3
|
2021-07-10T04:40:47.000Z
|
2021-08-30T19:54:56.000Z
|
import curses
import locale
import sys
from typing import TYPE_CHECKING, Any, Callable, Dict
from play_sounds import play_file as playsound
from play_sounds import play_while_running
from tetris.core import Game
from tetris.exceptions import CollisionError, OutOfBoundsError
from tetris.user_interface import UserInterface, create_screens, make_color_pairs
from tetris.utils import Window
if TYPE_CHECKING:
from tetris.core import Tetromino
else:
Tetromino = Any
sound_path = "bin/utils/sound/sfx_tetris_"
sfx_ingame_path = sound_path + "theme.wav"
KeyBindings = Dict[int, Callable[[Tetromino], None]]
KEY_BINDINGS: KeyBindings = {
curses.KEY_LEFT: lambda tetromino: tetromino.move_sideways("left"),
curses.KEY_RIGHT: lambda tetromino: tetromino.move_sideways("right"),
curses.KEY_DOWN: lambda tetromino: tetromino.move_down(),
ord("s"): lambda tetromino: tetromino.move_all_the_way_down(),
ord("a"): lambda tetromino: tetromino.rotate("left"),
ord("d"): lambda tetromino: tetromino.rotate("right"),
}
def start_new_game(curse_context: Window) -> None:
"""Prompt to start a new game"""
with play_while_running(sfx_ingame_path):
main(curse_context)
def main(stdscr: Window) -> None:
"""Main function called from outside with all attributes"""
locale.setlocale(locale.LC_ALL, "")
stdscr.nodelay(True)
curses.curs_set(False)
border_screen, inner_screen = create_screens(stdscr)
assert border_screen is not None, "minimum screen size required"
assert inner_screen is not None, "minimum screen size required"
make_color_pairs()
inner_screen.timeout(100)
inner_screen.keypad(True)
user_interface = UserInterface(stdscr, inner_screen)
game = Game(inner_screen, user_interface)
while True:
for screen in (inner_screen, border_screen, stdscr):
screen.erase()
border_screen.box(0, 0)
user_interface.render_landed_tetrominos(game.grid)
user_interface.render_current_tetromino(game.tetromino)
user_interface.render_next_tetromino(game.next_tetromino)
user_interface.render_instructions()
user_interface.render_score(game.score)
stdscr.refresh()
inner_screen.refresh()
if not game.paused:
game.handle_falling()
game.clear_rows()
try:
user_input = inner_screen.getch()
except curses.error:
continue
except KeyboardInterrupt:
sys.exit()
if user_input == ord("p"):
game.pause()
elif user_input == ord("q"):
return
elif not game.paused and user_input in KEY_BINDINGS:
try:
KEY_BINDINGS[user_input](game.tetromino)
except (CollisionError, OutOfBoundsError):
continue
if __name__ == "__main__":
curses.wrapper(start_new_game)
curses.endwin()
| 29.36
| 81
| 0.695163
|
729a75d65aa3add25a9c332c4f14cc46f65d7def
| 712
|
py
|
Python
|
Gram/migrations/0002_auto_20181009_1203.py
|
Drongo-1/new-ip2
|
51a9fa5d5d587fe5b3a038156726a45b312acbad
|
[
"MIT"
] | 1
|
2021-08-16T06:02:38.000Z
|
2021-08-16T06:02:38.000Z
|
Gram/migrations/0002_auto_20181009_1203.py
|
danalvin/Django-IP2
|
3c0a6e4d2de9d4e027b11f2873ed69aba4509fc7
|
[
"MIT"
] | null | null | null |
Gram/migrations/0002_auto_20181009_1203.py
|
danalvin/Django-IP2
|
3c0a6e4d2de9d4e027b11f2873ed69aba4509fc7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-09 12:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Gram', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='image',
options={'ordering': ['-post_date']},
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
]
| 26.37037
| 137
| 0.634831
|
f766678adcbb38de107f7ff94b470f5a6c23eb58
| 604
|
py
|
Python
|
S1/TP5/ex9.py
|
HerbeMalveillante/ecole
|
bebbc73cd678c58c9cd40389ea1cf229a0200308
|
[
"MIT"
] | null | null | null |
S1/TP5/ex9.py
|
HerbeMalveillante/ecole
|
bebbc73cd678c58c9cd40389ea1cf229a0200308
|
[
"MIT"
] | null | null | null |
S1/TP5/ex9.py
|
HerbeMalveillante/ecole
|
bebbc73cd678c58c9cd40389ea1cf229a0200308
|
[
"MIT"
] | null | null | null |
from random import randint
ls = [randint(-5, 5) for i in range(3)]
print(ls)
if ls[0] > ls[1]:
ls[0], ls[1] = ls[1], ls[0]
if ls[1] > ls[2]:
ls[1], ls[2] = ls[2], ls[1]
if ls[0] > ls[1]:
ls[0], ls[1] = ls[1], ls[0]
print(ls)
# le code précédent trie une liste de trois éléments
def bubbleSort(lis):
for i in range(len(lis) - 1):
for j in range(0, len(lis) - i - 1):
if lis[j] > lis[j + 1]:
lis[j], lis[j + 1] = lis[j + 1], lis[j]
return lis
randomList = [randint(-10, 10) for i in range(10)]
print(randomList)
print(bubbleSort(randomList))
| 20.827586
| 55
| 0.543046
|
dfeb749a1bfa898f442d08e5e4e9c3f8e1f767cf
| 641
|
py
|
Python
|
students/K33401/laboratory_works/Egorov_Michil/laboratory_work_1/3_math_operations/client.py
|
EgorovM/ITMO_ICT_WebDevelopment_2021-2022
|
35c41ba024d7a3cd89654bd4db23f7d447e0f0a2
|
[
"MIT"
] | null | null | null |
students/K33401/laboratory_works/Egorov_Michil/laboratory_work_1/3_math_operations/client.py
|
EgorovM/ITMO_ICT_WebDevelopment_2021-2022
|
35c41ba024d7a3cd89654bd4db23f7d447e0f0a2
|
[
"MIT"
] | null | null | null |
students/K33401/laboratory_works/Egorov_Michil/laboratory_work_1/3_math_operations/client.py
|
EgorovM/ITMO_ICT_WebDevelopment_2021-2022
|
35c41ba024d7a3cd89654bd4db23f7d447e0f0a2
|
[
"MIT"
] | null | null | null |
import socket
TASKS = [
'pythagorean 3 4',
'quadratic_equation 1 2 1',
'trapezoid_area 4 5 3',
'parallelogram_area 4 2',
'-1 2 4 asdasd',
'pythagorean ',
'quadratic_equation 0 2 1',
'trapezoid_area -4 5 0',
'trapezoid_area 4 asdg',
'trapezoid_area 4',
'parallelogram_area 4 0',
]
if __name__ == "__main__":
for task in TASKS:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(("127.0.0.1", 8000))
conn.send(task.encode('utf-8'))
server_answer = conn.recv(16348).decode('utf-8')
print(task, ':', server_answer)
conn.close()
| 25.64
| 64
| 0.605304
|
85b42b86c120a2df19f8670a29e3427b8d0ce992
| 665
|
py
|
Python
|
tools/XstreamDL_CLI/extractors/hls/ext/xprogram_date_time.py
|
wayneclub/Subtitle-Downloader
|
4ab1e7ab075593d4245867996b0766efc5aa418a
|
[
"MIT"
] | 57
|
2021-12-05T02:31:51.000Z
|
2022-03-31T03:36:26.000Z
|
tools/XstreamDL_CLI/extractors/hls/ext/xprogram_date_time.py
|
wayneclub/Subtitle-Downloader
|
4ab1e7ab075593d4245867996b0766efc5aa418a
|
[
"MIT"
] | 25
|
2021-12-08T09:16:30.000Z
|
2022-03-31T11:09:10.000Z
|
tools/XstreamDL_CLI/extractors/hls/ext/xprogram_date_time.py
|
wayneclub/Subtitle-Downloader
|
4ab1e7ab075593d4245867996b0766efc5aa418a
|
[
"MIT"
] | 17
|
2021-12-01T03:11:41.000Z
|
2022-03-26T15:22:35.000Z
|
from datetime import datetime
from .x import X
class XProgramDateTime(X):
'''
#EXT-X-PROGRAM-DATE-TIME 第一个分段的绝对时间
- 2019-01-01T00:00:00.000Z
'''
def __init__(self):
super(XProgramDateTime, self).__init__('#EXT-X-PROGRAM-DATE-TIME')
self.program_date_time = None # type: datetime
def set_attrs_from_line(self, line: str):
'''
重写父类同名函数
'''
line = self.get_tag_info(line)
if line.endswith('Z') is True:
line = f'{line[:-1]}+00:00'
try:
self.program_date_time = datetime.fromisoformat(line)
except Exception:
raise
return self
| 26.6
| 74
| 0.58797
|
06f0aa33d5c8b3f7f88f4324bc4dd3c885cc200f
| 8,335
|
py
|
Python
|
tensorflow/python/keras/engine/deferred_sequential_test.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/keras/engine/deferred_sequential_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/keras/engine/deferred_sequential_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 28
|
2020-02-10T07:03:06.000Z
|
2022-01-12T11:19:20.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests specific to deferred-build `Sequential` models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestDeferredSequential(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_build_behavior(self):
# Test graph network creation after __call__
model = get_model()
model(np.random.random((2, 6)))
self.assertLen(model.weights, 4)
self.assertTrue(model._is_graph_network)
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [2, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [2, 2])
# Test effect of new __call__ with a different shape
model(np.random.random((3, 6)))
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
model(np.random.random((4, 6)))
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
# Test graph network creation after build
model = get_model()
model.build((None, 6))
self.assertLen(model.weights, 4)
self.assertTrue(model._is_graph_network)
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
# Test graph network creation after compile/fit
model = get_model()
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.zeros((2, 6)), np.zeros((2, 2)))
self.assertLen(model.weights, 4)
self.assertTrue(model._is_graph_network)
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
# Inconsistency here: with eager `fit`, the model is built with shape
# (2, 6), but with graph function `fit`, it is built with shape `(None, 6)`.
# This is likely due to our assumption "the batch size should be dynamic"
# at the level of `Model`. TODO(fchollet): investigate and resolve.
self.assertEqual(model.inputs[0].shape.as_list()[-1], 6)
self.assertEqual(model.outputs[0].shape.as_list()[-1], 2)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_add_and_pop(self):
model = get_model()
model.build((None, 6))
self.assertTrue(model.built)
self.assertTrue(model._is_graph_network)
self.assertLen(model.layers, 3)
self.assertLen(model.weights, 4)
model.pop()
self.assertTrue(model.built)
self.assertTrue(model._is_graph_network)
self.assertLen(model.layers, 2)
self.assertLen(model.weights, 2)
model.add(keras.layers.Dense(2))
self.assertTrue(model.built)
self.assertTrue(model._is_graph_network)
self.assertLen(model.layers, 3)
self.assertLen(model.weights, 4)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_feature_extraction(self):
# This tests layer connectivity reset when rebuilding
model = get_model()
model(np.random.random((3, 6))) # First build
model(np.random.random((4, 6))) # Triggers a rebuild
# Classic feature extractor pattern
extractor = keras.Model(inputs=model.inputs,
outputs=[layer.output for layer in model.layers])
# Check that inputs and outputs are connected
_ = extractor(np.random.random((4, 6)))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_saving_savedmodel(self):
model = get_model()
model(np.random.random((3, 6))) # Build model
path = os.path.join(self.get_temp_dir(), 'model_path')
model.save(path)
new_model = keras.models.load_model(path)
for layer1, layer2 in zip(model._layers, new_model._layers):
self.assertEqual(layer1.name, layer2.name)
for w1, w2 in zip(layer1.weights, layer2.weights):
self.assertAllClose(w1, w2)
@unittest.skipIf(h5py is None, 'Test requires h5py')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_saving_h5(self):
path = os.path.join(self.get_temp_dir(), 'model_path.h5')
model = get_model()
model(np.random.random((3, 6))) # Build model
path = os.path.join(self.get_temp_dir(), 'model_path.h5')
model.save(path)
new_model = keras.models.load_model(path)
for layer1, layer2 in zip(model._layers, new_model._layers):
self.assertEqual(layer1.name, layer2.name)
for w1, w2 in zip(layer1.weights, layer2.weights):
self.assertAllClose(w1, w2)
@keras_parameterized.run_all_keras_modes
def test_shared_layer(self):
# This tests that preexisting layer connectivity is preserved
# when auto-building graph networks
shared_layer = keras.layers.Dense(2)
m1 = keras.Sequential([shared_layer])
m1(np.random.random((3, 6)))
m2 = keras.Sequential([shared_layer])
m2(np.random.random((3, 6)))
# Nesting case
shared_layer = keras.layers.Dense(2)
m1 = keras.Sequential([shared_layer])
m2 = keras.Sequential([shared_layer, m1])
m2(np.random.random((3, 2)))
@keras_parameterized.run_all_keras_modes
def test_loss_layer(self):
class LossLayer(keras.layers.Layer):
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
# Test loss layer alone
model = keras.Sequential([LossLayer()])
model.compile('rmsprop', run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 2)))
self.assertAllClose(loss, 4.)
model(np.random.random((4, 2))) # Triggers a rebuild
loss = model.train_on_batch(np.ones((1, 2)))
self.assertAllClose(loss, 2.)
# Test loss layer combined with another layer
model = keras.Sequential([
keras.layers.Dense(1, kernel_initializer='ones'),
LossLayer()])
model.compile('rmsprop', run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 2)))
self.assertAllClose(loss, 4.)
model(np.random.random((4, 2))) # Triggers a rebuild
loss = model.train_on_batch(np.ones((1, 2)))
self.assertLess(loss, 2.)
# Test loss layer combined with external loss
model = keras.Sequential([
keras.layers.Dense(1, kernel_initializer='ones'),
LossLayer()])
model.compile('rmsprop', 'mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 2)), np.ones((2, 2)))
model(np.random.random((4, 2))) # Triggers a rebuild
loss = model.train_on_batch(np.ones((1, 2)), np.ones((1, 2)))
def get_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, name='first_layer'))
model.add(keras.layers.Dropout(0.3, name='dp'))
model.add(keras.layers.Dense(2, name='last_layer'))
return model
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| 38.410138
| 80
| 0.69958
|
44af7162175bffc9b365d2a750baf9fd66242a80
| 1,860
|
py
|
Python
|
openstack/tests/unit/identity/v3/test_endpoint.py
|
teresa-ho/stx-openstacksdk
|
7d723da3ffe9861e6e9abcaeadc1991689f782c5
|
[
"Apache-2.0"
] | 43
|
2018-12-19T08:39:15.000Z
|
2021-07-21T02:45:43.000Z
|
openstack/tests/unit/identity/v3/test_endpoint.py
|
teresa-ho/stx-openstacksdk
|
7d723da3ffe9861e6e9abcaeadc1991689f782c5
|
[
"Apache-2.0"
] | 11
|
2019-03-17T13:28:56.000Z
|
2020-09-23T23:57:50.000Z
|
openstack/tests/unit/identity/v3/test_endpoint.py
|
teresa-ho/stx-openstacksdk
|
7d723da3ffe9861e6e9abcaeadc1991689f782c5
|
[
"Apache-2.0"
] | 47
|
2018-12-19T05:14:25.000Z
|
2022-03-19T15:28:30.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.identity.v3 import endpoint
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'enabled': True,
'id': IDENTIFIER,
'interface': '3',
'links': {'self': 'http://example.com/endpoint1'},
'region_id': '4',
'service_id': '5',
'url': '6',
}
class TestEndpoint(testtools.TestCase):
def test_basic(self):
sot = endpoint.Endpoint()
self.assertEqual('endpoint', sot.resource_key)
self.assertEqual('endpoints', sot.resources_key)
self.assertEqual('/endpoints', sot.base_path)
self.assertEqual('identity', sot.service.service_type)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_get)
self.assertTrue(sot.allow_update)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
self.assertTrue(sot.patch_update)
def test_make_it(self):
sot = endpoint.Endpoint(**EXAMPLE)
self.assertTrue(sot.is_enabled)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['interface'], sot.interface)
self.assertEqual(EXAMPLE['links'], sot.links)
self.assertEqual(EXAMPLE['region_id'], sot.region_id)
self.assertEqual(EXAMPLE['service_id'], sot.service_id)
self.assertEqual(EXAMPLE['url'], sot.url)
| 35.09434
| 75
| 0.68871
|
82aff1431b37c5406e48820802f8a341d2a149bf
| 1,636
|
py
|
Python
|
recipes/crow/all/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 562
|
2019-09-04T12:23:43.000Z
|
2022-03-29T16:41:43.000Z
|
recipes/crow/all/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 9,799
|
2019-09-04T12:02:11.000Z
|
2022-03-31T23:55:45.000Z
|
recipes/crow/all/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 1,126
|
2019-09-04T11:57:46.000Z
|
2022-03-31T16:43:38.000Z
|
from conans import ConanFile, tools, CMake
from conans.errors import ConanInvalidConfiguration
import os
class CrowConan(ConanFile):
name = "crow"
homepage = "https://github.com/ipkn/crow"
description = "Crow is C++ microframework for web. (inspired by Python Flask)"
topics = ("conan", "web", "microframework", "header-only")
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "compiler", "arch", "build_type"
exports_sources = ["patches/*"]
license = "BSD-3-Clause"
@property
def _source_subfolder(self):
return "source_subfolder"
def requirements(self):
self.requires("boost/1.69.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "crow-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def build(self):
if tools.Version(self.deps_cpp_info["boost"].version) >= "1.70.0":
raise ConanInvalidConfiguration("Crow requires Boost <1.70.0")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = CMake(self)
cmake.configure(source_folder=self._source_subfolder)
cmake.build()
def package(self):
self.copy(pattern="LICENSE*", dst="licenses", src=self._source_subfolder)
self.copy("*.h", dst=os.path.join("include", "crow"), src="amalgamate")
def package_id(self):
self.info.header_only()
def package_info(self):
if self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.system_libs = ["pthread"]
| 34.808511
| 82
| 0.643643
|
3aea16818f47ae2a215304a313e749a2c3bc1669
| 16,249
|
py
|
Python
|
example/apps/test_security/tests/output_request_log.py
|
druids/django-security
|
bca889ff1a58378a038a08ca365162d9e3ef3fbf
|
[
"MIT"
] | 9
|
2019-03-12T12:31:20.000Z
|
2021-01-22T13:31:36.000Z
|
example/apps/test_security/tests/output_request_log.py
|
druids/django-security
|
bca889ff1a58378a038a08ca365162d9e3ef3fbf
|
[
"MIT"
] | 28
|
2019-12-05T12:20:49.000Z
|
2022-03-25T08:15:10.000Z
|
example/apps/test_security/tests/output_request_log.py
|
druids/django-security
|
bca889ff1a58378a038a08ca365162d9e3ef3fbf
|
[
"MIT"
] | 5
|
2019-07-10T15:29:44.000Z
|
2021-02-01T12:50:56.000Z
|
import json
import responses
from requests.exceptions import ConnectionError
from django.test import override_settings
from germanium.decorators import data_consumer
from germanium.test_cases.client import ClientTestCase
from germanium.tools import (
assert_equal, assert_raises, assert_not_in, assert_in, assert_equal_model_fields, assert_is_not_none,
assert_length_equal, all_eq_obj
)
from security import requests
from security.backends.signals import (
output_request_started, output_request_finished, output_request_error
)
from security.decorators import log_with_data
from security.enums import RequestLogState
from security.backends.sql.models import OutputRequestLog as SQLOutputRequestLog
from security.backends.elasticsearch.models import OutputRequestLog as ElasticsearchOutputRequestLog
from security.backends.testing import capture_security_logs
from .base import BaseTestCaseMixin, TRUNCATION_CHAR
@override_settings(SECURITY_BACKEND_WRITERS={})
class OutputRequestLogTestCase(BaseTestCaseMixin, ClientTestCase):
@responses.activate
@data_consumer('create_user')
def test_output_request_should_be_logged(self, user):
responses.add(responses.POST, 'https://localhost/test', body='test')
expected_output_request_started_data = {
'request_headers': {
'User-Agent': 'python-requests/2.26.0',
'Accept-Encoding': 'gzip, deflate',
'Accept': '*/*',
'Connection': 'keep-alive',
'Content-Length': '16'
},
'request_body': '{"test": "test"}',
'method': 'POST',
'host': 'localhost',
'path': '/test',
'queries': {},
'is_secure': True,
'start': all_eq_obj,
}
expected_output_request_finished_data = {
**expected_output_request_started_data,
'stop': all_eq_obj,
'response_code': 200,
'response_headers': {'Content-Type': 'text/plain'},
'response_body': 'test',
}
with capture_security_logs() as logged_data:
requests.post(
'https://localhost/test',
data=json.dumps({'test': 'test'}),
slug='test',
related_objects=[user]
)
assert_length_equal(logged_data.output_request_started, 1)
assert_length_equal(logged_data.output_request_finished, 1)
assert_length_equal(logged_data.output_request_error, 0)
assert_equal(logged_data.output_request_started[0].data, expected_output_request_started_data)
assert_equal(logged_data.output_request_finished[0].data, expected_output_request_finished_data)
assert_equal(logged_data.output_request_started[0].slug, 'test')
assert_equal(logged_data.output_request_finished[0].related_objects, [user])
@responses.activate
def test_output_request_error_should_be_logged(self):
expected_output_request_started_data = {
'request_headers': {
'User-Agent': 'python-requests/2.26.0',
'Accept-Encoding': 'gzip, deflate',
'Accept': '*/*',
'Connection': 'keep-alive',
'Content-Length': '16'
},
'request_body': '{"test": "test"}',
'method': 'POST',
'host': 'localhost',
'path': '/test',
'queries': {},
'is_secure': True,
'start': all_eq_obj,
}
expected_output_request_error_data = {
**expected_output_request_started_data,
'stop': all_eq_obj,
'error_message': all_eq_obj
}
with capture_security_logs() as logged_data:
with assert_raises(ConnectionError):
requests.post(
'https://localhost/test',
data=json.dumps({'test': 'test'}),
)
assert_length_equal(logged_data.output_request_started, 1)
assert_length_equal(logged_data.output_request_finished, 0)
assert_length_equal(logged_data.output_request_error, 1)
assert_equal(logged_data.output_request_started[0].data, expected_output_request_started_data)
assert_equal(logged_data.output_request_error[0].data, expected_output_request_error_data)
@responses.activate
def test_response_sensitive_data_body_in_json_should_be_hidden(self):
responses.add(responses.POST, 'http://localhost', body='test')
with capture_security_logs() as logged_data:
requests.post('http://localhost', data=json.dumps({'password': 'secret-password'}))
assert_in('"password": "[Filtered]"', logged_data.output_request[0].data['request_body'])
assert_not_in('"password": "secret-password"', logged_data.output_request[0].data['request_body'])
assert_in('"password": "secret-password"', responses.calls[0].request.body)
assert_not_in('"password": "[Filtered]"', responses.calls[0].request.body)
@responses.activate
def test_response_sensitive_headers_should_be_hidden(self):
responses.add(responses.POST, 'http://localhost', body='test')
with capture_security_logs() as logged_data:
requests.post('http://localhost', headers={'token': 'secret'})
assert_equal(logged_data.output_request[0].data['request_headers']['token'], '[Filtered]')
assert_equal(responses.calls[0].request.headers['token'], 'secret')
@responses.activate
def test_response_sensitive_params_data_should_be_hidden(self):
responses.add(responses.POST, 'http://localhost', body='test')
with capture_security_logs() as logged_data:
requests.post('http://localhost', params={'token': 'secret'})
assert_equal(logged_data.output_request[0].data['queries']['token'], '[Filtered]')
assert_equal(responses.calls[0].request.url, 'http://localhost/?token=secret')
@responses.activate
def test_response_more_sensitive_params_data_should_be_hidden(self):
responses.add(responses.POST, 'http://localhost', body='test')
with capture_security_logs() as logged_data:
requests.post('http://localhost', params={'token': ['secret', 'secret2']})
assert_equal(logged_data.output_request[0].data['queries']['token'], ['[Filtered]', '[Filtered]'])
assert_equal(responses.calls[0].request.url, 'http://localhost/?token=secret&token=secret2')
@responses.activate
def test_response_sensitive_params_and_url_query_together_data_should_be_logged(self):
responses.add(responses.POST, 'http://localhost', body='test')
with capture_security_logs() as logged_data:
requests.post('http://localhost?a=1&a=2', params={'b': '6', 'a': '3', 'c': ['5']})
assert_equal(logged_data.output_request[0].data['queries'], {'b': '6', 'a': ['1', '2', '3'], 'c': '5'})
@responses.activate
@override_settings(SECURITY_BACKEND_WRITERS={'sql'})
@data_consumer('create_user')
def test_output_request_should_be_logged_in_sql_backend(self, user):
responses.add(responses.POST, 'https://localhost/test', body='test')
requests.post(
'https://localhost/test',
data=json.dumps({'test': 'test'}),
slug='test',
related_objects=[user]
)
assert_equal(SQLOutputRequestLog.objects.count(), 1)
sql_output_request_log = SQLOutputRequestLog.objects.get()
assert_equal_model_fields(
sql_output_request_log,
request_headers={
'User-Agent': 'python-requests/2.26.0',
'Accept-Encoding': 'gzip, deflate',
'Accept': '*/*',
'Connection': 'keep-alive',
'Content-Length': '16'
},
request_body='{"test": "test"}',
method='POST',
host='localhost',
path='/test',
queries={},
is_secure=True,
slug='test',
time=(sql_output_request_log.stop - sql_output_request_log.start).total_seconds(),
extra_data={},
error_message=None,
response_code=200,
response_headers={'Content-Type': 'text/plain'},
response_body='test',
state=RequestLogState.INFO,
)
assert_equal([rel_obj.object for rel_obj in sql_output_request_log.related_objects.all()], [user])
@responses.activate
@override_settings(SECURITY_BACKEND_WRITERS={'elasticsearch'})
@data_consumer('create_user')
def test_output_request_should_be_logged_in_elasticsearch_backend(self, user):
responses.add(responses.POST, 'https://localhost/test', body='test')
with capture_security_logs() as logged_data:
requests.post(
'https://localhost/test',
data=json.dumps({'test': 'test'}),
slug='test',
related_objects=[user]
)
elasticsearch_output_request_log = ElasticsearchOutputRequestLog.get(
id=logged_data.output_request[0].id
)
assert_equal_model_fields(
elasticsearch_output_request_log,
request_headers='{"User-Agent": "python-requests/2.26.0", "Accept-Encoding": "gzip, deflate", '
'"Accept": "*/*", "Connection": "keep-alive", "Content-Length": "16"}',
request_body='{"test": "test"}',
method='POST',
host='localhost',
path='/test',
queries='{}',
is_secure=True,
slug='test',
time=(elasticsearch_output_request_log.stop - elasticsearch_output_request_log.start).total_seconds(),
extra_data={},
error_message=None,
response_code=200,
response_headers='{"Content-Type": "text/plain"}',
response_body='test',
state=RequestLogState.INFO,
)
assert_equal(
[rel_obj for rel_obj in elasticsearch_output_request_log.related_objects],
['default|3|{}'.format(user.id)]
)
@responses.activate
@override_settings(SECURITY_BACKEND_WRITERS={'logging'})
@data_consumer('create_user')
def test_output_request_should_be_logged_in_logging_backend(self, user):
responses.add(responses.POST, 'https://localhost/test', body='test')
with capture_security_logs() as logged_data:
with self.assertLogs('security.output_request', level='INFO') as cm:
requests.post(
'https://localhost/test',
data=json.dumps({'test': 'test'}),
slug='test',
related_objects=[user]
)
assert_equal(
cm.output,
[
f'INFO:security.output_request:'
f'Output request "{logged_data.output_request[0].id}" '
f'to "localhost" with path "/test" was started',
f'INFO:security.output_request:'
f'Output request "{logged_data.output_request[0].id}" '
f'to "localhost" with path "/test" was successful'
]
)
@responses.activate
@override_settings(SECURITY_BACKEND_WRITERS={'sql'})
def test_error_output_request_should_be_logged_in_sql_backend(self,):
with assert_raises(ConnectionError):
requests.post(
'https://localhost/test',
data=json.dumps({'test': 'test'}),
slug='test',
)
assert_equal(SQLOutputRequestLog.objects.count(), 1)
sql_output_request_log = SQLOutputRequestLog.objects.get()
assert_equal_model_fields(
sql_output_request_log,
request_headers={
'User-Agent': 'python-requests/2.26.0',
'Accept-Encoding': 'gzip, deflate',
'Accept': '*/*',
'Connection': 'keep-alive',
'Content-Length': '16'
},
request_body='{"test": "test"}',
method='POST',
host='localhost',
path='/test',
queries={},
is_secure=True,
slug='test',
time=(sql_output_request_log.stop - sql_output_request_log.start).total_seconds(),
extra_data={},
response_code=None,
response_headers=None,
response_body=None,
state=RequestLogState.ERROR,
)
assert_is_not_none(sql_output_request_log.error_message)
@responses.activate
@override_settings(SECURITY_BACKEND_WRITERS={'elasticsearch'})
def test_error_output_request_should_be_logged_in_elasticsearch_backend(self):
with capture_security_logs() as logged_data:
with assert_raises(ConnectionError):
requests.post(
'https://localhost/test',
data=json.dumps({'test': 'test'}),
slug='test',
)
elasticsearch_input_request_log = ElasticsearchOutputRequestLog.get(
id=logged_data.output_request[0].id
)
assert_equal_model_fields(
elasticsearch_input_request_log,
request_headers='{"User-Agent": "python-requests/2.26.0", "Accept-Encoding": "gzip, deflate", '
'"Accept": "*/*", "Connection": "keep-alive", "Content-Length": "16"}',
request_body='{"test": "test"}',
method='POST',
host='localhost',
path='/test',
queries='{}',
is_secure=True,
slug='test',
time=(elasticsearch_input_request_log.stop - elasticsearch_input_request_log.start).total_seconds(),
extra_data={},
response_code=None,
response_headers=None,
response_body=None,
state=RequestLogState.ERROR,
)
assert_is_not_none(elasticsearch_input_request_log.error_message)
@responses.activate
@override_settings(SECURITY_BACKEND_WRITERS={'logging'})
def test_error_output_request_should_be_logged_in_logging_backend(self):
with capture_security_logs() as logged_data:
with self.assertLogs('security.output_request', level='INFO') as cm:
with assert_raises(ConnectionError):
requests.post(
'https://localhost/test',
data=json.dumps({'test': 'test'}),
slug='test',
)
assert_equal(
cm.output,
[
f'INFO:security.output_request:'
f'Output request "{logged_data.output_request[0].id}" '
f'to "localhost" with path "/test" was started',
f'ERROR:security.output_request:'
f'Output request "{logged_data.output_request[0].id}" '
f'to "localhost" with path "/test" failed'
]
)
@responses.activate
@data_consumer('create_user')
def test_slug_and_related_data_should_be_send_to_output_request_logger(self, user):
responses.add(responses.POST, 'https://localhost/test', body='test')
with log_with_data(related_objects=[user], slug='TEST'):
with capture_security_logs() as logged_data:
requests.post(
'https://localhost/test',
data=json.dumps({'test': 'test'}),
)
assert_equal(logged_data.output_request[0].related_objects, {user})
assert_equal(logged_data.output_request[0].slug, 'TEST')
| 45.515406
| 118
| 0.591975
|
6c4e7dd01cdef2526291e5ee9852323ab1b5018f
| 183
|
py
|
Python
|
yatube/yatube/urls.py
|
begunko/yatube_project
|
fe974e52fa97fa1084e7317b9366ec86da27f291
|
[
"BSD-3-Clause"
] | null | null | null |
yatube/yatube/urls.py
|
begunko/yatube_project
|
fe974e52fa97fa1084e7317b9366ec86da27f291
|
[
"BSD-3-Clause"
] | null | null | null |
yatube/yatube/urls.py
|
begunko/yatube_project
|
fe974e52fa97fa1084e7317b9366ec86da27f291
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("", include('posts.urls', namespace="posts")),
path('admin/', admin.site.urls),
]
| 22.875
| 55
| 0.688525
|
9e29efaa1466c9716ed219fa9a541d0f9973eb4d
| 5,829
|
py
|
Python
|
plot_univariate.py
|
No-Stream/pandas-snippets
|
f86d3d6844bb181a9ee596cf5d8a61a6edf5a60d
|
[
"MIT"
] | null | null | null |
plot_univariate.py
|
No-Stream/pandas-snippets
|
f86d3d6844bb181a9ee596cf5d8a61a6edf5a60d
|
[
"MIT"
] | null | null | null |
plot_univariate.py
|
No-Stream/pandas-snippets
|
f86d3d6844bb181a9ee596cf5d8a61a6edf5a60d
|
[
"MIT"
] | null | null | null |
from typing import List
from functools import partial
import warnings
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pandas.api.types import is_numeric_dtype
from tqdm import tqdm
def plot_feature(
feat: str,
y_col: str = None, # janky, making kwarg for easier functools.partial...
df: pd.DataFrame = None,
is_binary_outcome: bool = None,
coerce_cat_thresh: float = 0.001,
graph_sample: int = 10_000,
cat_freq_thresh: float = 0.01,
do_scatter: bool = False,
ylabel: str = None,
) -> None:
try:
this_df = df.copy().dropna(subset=[feat, y_col])
is_cat = (this_df[feat].dtype == "object") or (hasattr(this_df[feat], "cat"))
cardinality = this_df[feat].nunique()
rel_cardinality = cardinality / len(this_df)
is_dt = "datetime" in this_df[feat].dtype.name # HACK
is_numeric = is_numeric_dtype(this_df[feat])
is_binary = cardinality == 2
plot_lowess = not is_binary_outcome
graph_as_cat = (
(is_cat or (rel_cardinality < coerce_cat_thresh))
and not is_dt
and not is_numeric
) or is_binary
if graph_as_cat:
freqs = this_df[feat].value_counts(normalize=True)
# Filter on level of var occurred > cat_freq_thresh % of times; sort by freq
freqs = freqs.loc[freqs >= cat_freq_thresh]
levels_to_eval = freqs.index
if not list(levels_to_eval):
return None # very high cardinality, skip
plt.figure(figsize=(12, 8))
sns.catplot(
x=feat,
y=y_col,
data=this_df,
kind="point",
join=False,
order=levels_to_eval,
)
plt.xticks(rotation=90)
plt.title(f"{feat} -> {y_col}?")
if ylabel:
plt.ylabel(ylabel)
plt.show()
# consider dt to be days since minimum TS
elif is_dt and not is_numeric:
min_ts = this_df[feat].min()
days_since_min = (
pd.to_datetime(this_df[feat]) - pd.to_datetime(this_df[feat]).min()
) / pd.to_timedelta("1d")
empirical1pct, empirical99pct = (
days_since_min.quantile(0.01),
days_since_min.quantile(0.99),
)
fil_outliers = (days_since_min >= empirical1pct) & (
days_since_min <= empirical99pct
)
graph_sample = min(graph_sample, len(this_df.loc[fil_outliers]))
sns.regplot(
x=days_since_min.sample(graph_sample, random_state=42),
y=this_df[y_col].sample(graph_sample, random_state=42),
scatter_kws={"alpha": 0.2},
lowess=plot_lowess,
logistic=is_binary_outcome,
scatter=do_scatter,
)
plt.title(f"{feat} (as days since min.) -> {y_col}?")
if ylabel:
plt.ylabel(ylabel)
plt.show()
# numeric feature, use regplot
elif is_numeric:
# confirm it can be cast to float
_ = this_df[feat].astype("float")
empirical1pct, empirical99pct = (
this_df[feat].quantile(0.01),
this_df[feat].quantile(0.99),
)
fil_outliers = (this_df[feat] >= empirical1pct) & (
this_df[feat] <= empirical99pct
)
graph_sample = min(graph_sample, len(this_df.loc[fil_outliers]))
sampled = (
this_df.loc[fil_outliers, [feat, y_col]]
.sample(graph_sample, random_state=42)
.astype("float")
)
sns.regplot(
x=feat,
y=y_col,
data=sampled,
scatter_kws={"alpha": 0.2},
lowess=plot_lowess,
logistic=is_binary_outcome,
scatter=do_scatter,
)
plt.title(f"{feat} -> {y_col}?")
if ylabel:
plt.ylabel(ylabel)
plt.show()
else:
warnings.warn(f"Unhandled column {feat}")
except Exception as err:
warnings.warn(f"Error for feature {feat}.")
warnings.warn(str(err))
raise (err)
pass
def plot_univariate(
df: pd.DataFrame,
feats: List[str],
y_col: str,
coerce_cat_thresh: float = 0.001,
graph_sample: int = 10_000,
cat_freq_thresh: float = 0.01,
do_scatter: bool = False,
ylabel: str = None,
) -> None:
"""
Plot a list of features compared to outcome.
df: pd.DataFrame, containing relevant data
feats: list[str], colnames of x features to graph against your outcome
y_col: str, name of your outcome column, assume it's continuous
coerce_cat_thresh: float, will manually consider x cols to be cats if
len(df.col.unique()) < cat_thresh * len(df.col)
E.G. by default if less than 0.1% unique values, consider it to be categorical.
graph_sample: int, how many data points to use for scatter graphs (gets messy with too many)
cat_freq_thresh: float, % of the non-NA values of the column must be x in order to graph it.
i.e. ignore very rare cats.
return: None, will display graphs
"""
is_binary_outcome = df[y_col].nunique() == 2
plot_with_params = partial(
plot_feature,
y_col=y_col,
df=df,
is_binary_outcome=is_binary_outcome,
coerce_cat_thresh=coerce_cat_thresh,
graph_sample=graph_sample,
cat_freq_thresh=cat_freq_thresh,
do_scatter=do_scatter,
ylabel=ylabel,
)
[plot_with_params(feat) for feat in feats]
| 32.932203
| 96
| 0.569051
|
d1ddfbc21b42e46a6414ffd6abd9fef6e97e1d54
| 2,981
|
py
|
Python
|
test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v2/_configuration.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v2/_configuration.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v2/_configuration.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class MultiapiServiceClientConfiguration(Configuration):
"""Configuration for MultiapiServiceClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
"""
def __init__(
self,
credential, # type: "TokenCredential"
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(MultiapiServiceClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.api_version = "2.0.0"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'multiapi/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| 45.861538
| 125
| 0.686011
|
e2c8f7c8e41bd00c139b492bf0d373fb0f5530cb
| 6,024
|
py
|
Python
|
my_ml/model/random_forest.py
|
big-c-note/my_ml_from_scratch
|
7da54282f736b163bb82058b037755bf97c9d7b9
|
[
"MIT"
] | 1
|
2020-09-20T21:32:21.000Z
|
2020-09-20T21:32:21.000Z
|
my_ml/model/random_forest.py
|
big-c-note/my_ml_from_scratch
|
7da54282f736b163bb82058b037755bf97c9d7b9
|
[
"MIT"
] | 27
|
2020-09-20T21:18:48.000Z
|
2021-07-31T13:02:10.000Z
|
my_ml/model/random_forest.py
|
big-c-note/my_ml_from_scratch
|
7da54282f736b163bb82058b037755bf97c9d7b9
|
[
"MIT"
] | null | null | null |
from typing import List, Optional, Union
from multiprocessing import cpu_count
import logging
import numpy as np
import joblib
from joblib import Parallel, delayed
from tqdm import trange
from my_ml.model.decision_tree import DecisionTree
log = logging.getLogger(__name__)
class RandomForest:
def __init__(self, S: Union[int, float]):
"""
Base class for the Random forest. This class implements the import
build forest method as well as the predict method.
Parameters
----------
S : int
This is the number of trees for the random forest.
Methods
-------
predict(X: np.ndarray)
Returns predictions for a given feature set. The predictions come
out as a probability matrix that is m x k in dimensionality. That
is, the 0th column is each examples probability of being 0 and the
1st column is the each examples probability of being 1.
"""
self._S: Union[int, float] = S
self._forest: List = []
self._num_features: Optional[int] = None
self._num_training_examples: Optional[int] = None
# This is used for multi-processing. This allows for a speed up of (#
# of cores)X
self._cores: int = cpu_count()
def fit(self, X: np.ndarray, y: np.ndarray):
"""Method for fitting feature X (m x n) to y (m x 1) or (m,)."""
log.info("Creating decision trees.")
if not self._num_features:
self._num_features = X.shape[1]
self._build_forest(X, y)
assert len(self._forest) == self._S
def predict(self, X: np.ndarray, probability: bool = True):
"""
Return matrix of probabilities. 0th column references probability of
being class 0 and the 1st column references the probability of being
class 1.
"""
assert self._forest
assert len(self._forest) == self._S
try:
assert X.shape[0] > 1 and X.shape[1] > 1
except AssertionError:
raise NotImplementedError(
"""
I'm not supporting predictions of single examples. It's easy to
implement this functionality if yo uneed it.
"""
)
predictions: List = []
# Iterate through the forest and create a prediction for each tree.
for dtree in self._forest:
predictions.append(dtree.predict(X))
# Transposing the matrix for ease. This may not be needed.
predictions: np.ndarray = np.array(predictions).T
# Probabilities for the majority class.
prob_maj: np.ndarray = np.count_nonzero(
predictions == 0, axis=1
) / predictions.shape[1]
# Probabilities for the minority class.
prob_min: np.ndarray = np.count_nonzero(
predictions == 1, axis=1
) / predictions.shape[1]
# Making these column vectors.
prob_maj: np.ndarray = prob_maj.reshape(-1, 1)
prob_min: np.ndarray = prob_min.reshape(-1, 1)
# Stacking these column vectors side by side.
predictions: np.ndarray = np.hstack((prob_maj, prob_min))
# TODO: Need to add noise in the event that there is a tie. With
# sufficeintly large values or odd values of parameter S, this should
# not be a problem. However, all equal probability class predictions
# will resolve to the same prediction (should be random).
return predictions
def _build_forest(self, X: np.ndarray, y: np.ndarray, p: float = 1):
"""Build a forest of decision trees."""
# Making y a column vector.
y: np.ndarray = y.reshape(-1, 1)
# Adding the y outcome vector to the end of the feature vector to create
# T.
T: np.ndarray = np.hstack((X, y))
# Checking dimensions. This will run on T and Tc which will have a
# different number of examples, most likely.
assert self._num_features
assert T.shape[1] == self._num_features + 1
# Get the number of trees from S * p. p will be (1 - p) for T and p for
# Tc.
num_trees: int = int(np.round(self._S * p))
# Checking these sum to S.
assert np.round(self._S * p) + np.round(self._S * (1 - p)) == self._S
# Adding multi-processing for an easy speedup. Adding progress bar for
# viewing how long the job will take.
log.info(
"""
Showing progress on one of two forests to be created. Distributing
compute over all available cores.
"""
)
forest: List = Parallel(n_jobs=self._cores)(
delayed(self._build_forest_helper)(T) for i in trange(num_trees)
)
# Add forest to our list of decision trees.
self._forest += forest
@staticmethod
def _build_forest_helper(T) -> DecisionTree:
# Bagging the T dataset. Essentially I am sampling with replacement and
# creating T_rand which has the same dimensions as T.
T_rand: np.ndarray = T[np.random.randint(T.shape[0], size=T.shape[0]), :]
assert T_rand.shape == T.shape
# Separating X and y.
X_rand: np.ndarray = T_rand[:, :-1]
y_rand: np.ndarray = T_rand[:, -1]
# random_features=True means we will randomly generate a subset of
# features to use in each decision tree. This helps reduce
# variance.
dtree = DecisionTree(random_features=True)
dtree.fit(X_rand, y_rand)
# Add our decision tree to the forest.
return dtree
if __name__ == "__main__":
cat = joblib.load("tests/data/server.gz")
X_train: np.ndarray = cat["_X_train"]
X_test: np.ndarray = cat["_X_test"]
y_train: np.ndarray = cat["_y_train"]
y_test: np.ndarray = cat["_y_test"]
braf = BRAF(k=10, p=0.5, S=500)
braf.fit(X_train, y_train)
values: np.ndarray = braf.predict(X_test)
import ipdb
ipdb.set_trace()
| 39.631579
| 81
| 0.613878
|
218867a81401f6f52ce634a168677108c390bc49
| 15,800
|
py
|
Python
|
stm32loader.py
|
usc-ee250-spring2021/zwir1
|
3510a903ef8952a8deefe4cfb39464b4dc0806ba
|
[
"MIT"
] | null | null | null |
stm32loader.py
|
usc-ee250-spring2021/zwir1
|
3510a903ef8952a8deefe4cfb39464b4dc0806ba
|
[
"MIT"
] | null | null | null |
stm32loader.py
|
usc-ee250-spring2021/zwir1
|
3510a903ef8952a8deefe4cfb39464b4dc0806ba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:si:et:enc=utf-8
# Author: Ivan A-R <ivan@tuxotronic.org>
# Project page: http://tuxotronic.org/wiki/projects/stm32loader
#
# This file is part of stm32loader.
#
# stm32loader is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
#
# stm32loader is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with stm32loader; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
import sys, getopt
import serial
import time
import RPi.GPIO as gpio
RST_PIN = 7
#RST_PIN = 12
BSEL_PIN = 11
try:
from progressbar import *
usepbar = 1
except:
usepbar = 0
# Verbose level
QUIET = 20
# these come from AN2606
chip_ids = {
0x412: "STM32 Low-density",
0x410: "STM32 Medium-density",
0x414: "STM32 High-density",
0x420: "STM32 Medium-density value line",
0x428: "STM32 High-density value line",
0x430: "STM32 XL-density",
0x416: "STM32 Medium-density ultralow power line",
0x411: "STM32F2xx",
0x413: "STM32F4xx",
}
def mdebug(level, message):
if(QUIET >= level):
print >> sys.stderr , message
class CmdException(Exception):
pass
class CommandInterface:
extended_erase = 0
def open(self, aport='/dev/ttyAMA0', abaudrate=115200) :
self.sp = serial.Serial(
port=aport,
baudrate=abaudrate, # baudrate
bytesize=8, # number of databits
parity=serial.PARITY_EVEN,
stopbits=1,
xonxoff=0, # don't enable software flow control
rtscts=0, # don't enable RTS/CTS flow control
timeout=5 # set a timeout value, None for waiting forever
)
self . sp . flushInput ( );
def _wait_for_ask(self, info = ""):
# wait for ask
try:
ask = ord(self.sp.read())
except:
raise CmdException("Can't read port or timeout")
else:
if ask == 0x79:
# ACK
return 1
else:
if ask == 0x1F:
# NACK
raise CmdException("NACK "+info)
else:
# Unknown responce
raise CmdException("Unknown response. "+info+": "+hex(ask))
def reset(self):
gpio . output ( RST_PIN, gpio . LOW )
time . sleep ( .1 )
gpio . output ( RST_PIN, gpio . HIGH )
time . sleep ( .5 )
def initChip(self):
# Set boot
print ( "Init!" )
gpio . output ( BSEL_PIN, gpio . HIGH )
self.reset()
self.sp.write("\x7F") # Syncro
return self._wait_for_ask("Syncro")
def releaseChip(self):
gpio . output ( BSEL_PIN, gpio . LOW )
self.reset()
gpio . cleanup ( )
def cmdGeneric(self, cmd):
self.sp.write(chr(cmd))
self.sp.write(chr(cmd ^ 0xFF)) # Control byte
return self._wait_for_ask(hex(cmd))
def cmdGet(self):
if self.cmdGeneric(0x00):
mdebug(10, "*** Get command");
len = ord(self.sp.read())
version = ord(self.sp.read())
mdebug(10, " Bootloader version: "+hex(version))
dat = map(lambda c: hex(ord(c)), self.sp.read(len))
if '0x44' in dat:
self.extended_erase = 1
mdebug(10, " Available commands: "+", ".join(dat))
self._wait_for_ask("0x00 end")
return version
else:
raise CmdException("Get (0x00) failed")
def cmdGetVersion(self):
if self.cmdGeneric(0x01):
mdebug(10, "*** GetVersion command")
version = ord(self.sp.read())
self.sp.read(2)
self._wait_for_ask("0x01 end")
mdebug(10, " Bootloader version: "+hex(version))
return version
else:
raise CmdException("GetVersion (0x01) failed")
def cmdGetID(self):
if self.cmdGeneric(0x02):
mdebug(10, "*** GetID command")
len = ord(self.sp.read())
id = self.sp.read(len+1)
self._wait_for_ask("0x02 end")
return reduce(lambda x, y: x*0x100+y, map(ord, id))
else:
raise CmdException("GetID (0x02) failed")
def _encode_addr(self, addr):
byte3 = (addr >> 0) & 0xFF
byte2 = (addr >> 8) & 0xFF
byte1 = (addr >> 16) & 0xFF
byte0 = (addr >> 24) & 0xFF
crc = byte0 ^ byte1 ^ byte2 ^ byte3
return (chr(byte0) + chr(byte1) + chr(byte2) + chr(byte3) + chr(crc))
def cmdReadMemory(self, addr, lng):
assert(lng <= 256)
if self.cmdGeneric(0x11):
mdebug(10, "*** ReadMemory command")
self.sp.write(self._encode_addr(addr))
mdebug ( 10, "// addr '" + self._encode_addr(addr) + ", " + len ( self . _encode_addr(addr) ) + "'" )
self._wait_for_ask("0x11 address failed")
N = (lng - 1) & 0xFF
crc = N ^ 0xFF
self.sp.write(chr(N) + chr(crc))
self._wait_for_ask("0x11 length failed")
return map(lambda c: ord(c), self.sp.read(lng))
else:
raise CmdException("ReadMemory (0x11) failed")
def cmdGo(self, addr):
if self.cmdGeneric(0x21):
mdebug(10, "*** Go command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ask("0x21 go failed")
else:
raise CmdException("Go (0x21) failed")
def cmdWriteMemory(self, addr, data):
assert(len(data) <= 256)
if self.cmdGeneric(0x31):
mdebug(10, "*** Write memory command")
#self . sp . write ( self . _encode_addr2 ( addr ) )
self . sp . write ( self . _encode_addr ( addr ) )
self._wait_for_ask("0x31 address failed")
mdebug(10, " -- addr written" )
#map(lambda c: hex(ord(c)), data)
lng = (len(data)-1) & 0xFF
mdebug(10, " %s bytes to write" % [lng+1]);
self.sp.write(chr(lng)) # len really
crc = 0xFF
for c in data:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ask("0x31 programming failed")
mdebug(10, " Write memory done")
else:
raise CmdException("Write memory (0x31) failed")
def cmdEraseMemory(self, sectors = None):
if self.extended_erase:
return cmd.cmdExtendedEraseMemory()
if self.cmdGeneric(0x43):
mdebug(10, "*** Erase memory command")
if sectors is None:
# Global erase
self.sp.write(chr(0xFF))
self.sp.write(chr(0x00))
else:
# Sectors erase
self.sp.write(chr((len(sectors)-1) & 0xFF))
crc = 0xFF
for c in sectors:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ask("0x43 erasing failed")
mdebug(10, " Erase memory done")
else:
raise CmdException("Erase memory (0x43) failed")
def cmdExtendedEraseMemory(self):
if self.cmdGeneric(0x44):
mdebug(10, "*** Extended Erase memory command")
# Global mass erase
self.sp.write(chr(0xFF))
self.sp.write(chr(0xFF))
# Checksum
self.sp.write(chr(0x00))
tmp = self.sp.timeout
self.sp.timeout = 30
print("Extended erase (0x44), this can take ten seconds or more")
self._wait_for_ask("0x44 erasing failed")
self.sp.timeout = tmp
mdebug(10, " Extended Erase memory done")
else:
raise CmdException("Extended Erase memory (0x44) failed")
def cmdWriteProtect(self, sectors):
if self.cmdGeneric(0x63):
mdebug(10, "*** Write protect command")
self.sp.write(chr((len(sectors)-1) & 0xFF))
crc = 0xFF
for c in sectors:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ask("0x63 write protect failed")
mdebug(10, " Write protect done")
else:
raise CmdException("Write Protect memory (0x63) failed")
def cmdWriteUnprotect(self):
if self.cmdGeneric(0x73):
mdebug(10, "*** Write Unprotect command")
self._wait_for_ask("0x73 write unprotect failed")
self._wait_for_ask("0x73 write unprotect 2 failed")
mdebug(10, " Write Unprotect done")
else:
raise CmdException("Write Unprotect (0x73) failed")
def cmdReadoutProtect(self):
if self.cmdGeneric(0x82):
mdebug(10, "*** Readout protect command")
self._wait_for_ask("0x82 readout protect failed")
self._wait_for_ask("0x82 readout protect 2 failed")
mdebug(10, " Read protect done")
else:
raise CmdException("Readout protect (0x82) failed")
def cmdReadoutUnprotect(self):
if self.cmdGeneric(0x92):
mdebug(10, "*** Readout Unprotect command")
self._wait_for_ask("0x92 readout unprotect failed")
self._wait_for_ask("0x92 readout unprotect 2 failed")
mdebug(10, " Read Unprotect done")
else:
raise CmdException("Readout unprotect (0x92) failed")
# Complex commands section
def readMemory(self, addr, lng):
data = []
if usepbar:
widgets = ['Reading: ', Percentage(),', ', ETA(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets,maxval=lng, term_width=79).start()
while lng > 256:
if usepbar:
pbar.update(pbar.maxval-lng)
else:
mdebug(5, "Read %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
data = data + self.cmdReadMemory(addr, 256)
addr = addr + 256
lng = lng - 256
if usepbar:
pbar.update(pbar.maxval-lng)
pbar.finish()
else:
mdebug(5, "Read %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
data = data + self.cmdReadMemory(addr, lng)
return data
def writeMemory(self, addr, data):
lng = len(data)
if usepbar:
widgets = ['Writing: ', Percentage(),' ', ETA(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets, maxval=lng, term_width=79).start()
offs = 0
while lng > 256:
if usepbar:
pbar.update(pbar.maxval-lng)
else:
mdebug(5, "Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
self.cmdWriteMemory(addr, data[offs:offs+256])
offs = offs + 256
addr = addr + 256
lng = lng - 256
if usepbar:
pbar.update(pbar.maxval-lng)
pbar.finish()
else:
mdebug(5, "Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
self.cmdWriteMemory(addr, data[offs:offs+lng] + ([0xFF] * (256-lng)) )
#def __init__(self):
#pass
def usage():
print("""Usage: %s [-hqVewvr] [-l length] [-p port] [-b baud] [-a addr] [-g addr] [file.bin]
-h This help
-q Quiet
-V Verbose
-e Erase
-w Write
-v Verify
-r Read
-l length Length of read
-p port Serial port (default: /dev/ttyAMA0)
-b baud Baud speed (default: 115200)
-a addr Target address
-g addr Address to start running at (0x08000000, usually)
./stm32loader.py -e -w -v example/main.bin
""") % sys.argv[0]
if __name__ == "__main__":
# Import Psyco if available
try:
import psyco
psyco.full()
print("Using Psyco...")
except ImportError:
pass
conf = {
'port': '/dev/ttyAMA0',
'baud': 115200,
'address': 0x08000000,
'erase': 0,
'write': 0,
'verify': 0,
'read': 0,
'go_addr':-1,
}
# http://www.python.org/doc/2.5.2/lib/module-getopt.html
try:
opts, args = getopt.getopt(sys.argv[1:], "hqVewvrp:b:a:l:g:")
except (getopt.GetoptError, err):
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
QUIET = 5
for o, a in opts:
if o == '-V':
QUIET = 10
elif o == '-q':
QUIET = 0
elif o == '-h':
usage()
sys.exit(0)
elif o == '-e':
conf['erase'] = 1
elif o == '-w':
conf['write'] = 1
elif o == '-v':
conf['verify'] = 1
elif o == '-r':
conf['read'] = 1
elif o == '-p':
conf['port'] = a
elif o == '-b':
conf['baud'] = eval(a)
elif o == '-a':
conf['address'] = eval(a)
elif o == '-g':
conf['go_addr'] = eval(a)
elif o == '-l':
conf['len'] = eval(a)
else:
assert False, "unhandled option"
# gpio initialize
gpio . setmode ( gpio . BOARD )
gpio . setup ( [ RST_PIN, BSEL_PIN ], gpio . OUT )
cmd = CommandInterface()
cmd.open(conf['port'], conf['baud'])
mdebug(10, "Open port %(port)s, baud %(baud)d" % {'port':conf['port'], 'baud':conf['baud']})
try:
try:
cmd.initChip()
except Exception as e:
print("Can't init. Ensure that BOOT0 is enabled and reset device")
print(e)
exit ( 1 )
bootversion = cmd.cmdGet()
mdebug(0, "Bootloader version %X" % bootversion)
id = cmd.cmdGetID()
mdebug(0, "Chip id: 0x%x (%s)" % (id, chip_ids.get(id, "Unknown")))
# cmd.cmdGetVersion()
# cmd.cmdGetID()
# cmd.cmdReadoutUnprotect()
# cmd.cmdWriteUnprotect()
# cmd.cmdWriteProtect([0, 1])
if (conf['write'] or conf['verify']):
data = map(lambda c: ord(c), file(args[0], 'rb').read())
if conf['erase']:
cmd.cmdEraseMemory()
if conf['write']:
cmd.writeMemory(conf['address'], data)
if conf['verify']:
verify = cmd.readMemory(conf['address'], len(data))
if(data == verify):
print("Verification OK")
else:
print("Verification FAILED")
print(str(len(data)) + ' vs ' + str(len(verify)))
for i in xrange(0, len(data)):
if data[i] != verify[i]:
print(hex(i) + ': ' + hex(data[i]) + ' vs ' + hex(verify[i]))
if not conf['write'] and conf['read']:
rdata = cmd.readMemory(conf['address'], conf['len'])
file(args[0], 'wb').write(''.join(map(chr,rdata)))
if conf['go_addr'] != -1:
cmd.cmdGo(conf['go_addr'])
finally:
cmd.releaseChip()
| 32.244898
| 113
| 0.52038
|
c0d91d00210e877f747e282991a9488de85994af
| 4,936
|
py
|
Python
|
backend/sqlite.py
|
relkochta/koreader-sync
|
49b02812fe555675dc7848dbbd289a282cac5efd
|
[
"MIT"
] | null | null | null |
backend/sqlite.py
|
relkochta/koreader-sync
|
49b02812fe555675dc7848dbbd289a282cac5efd
|
[
"MIT"
] | null | null | null |
backend/sqlite.py
|
relkochta/koreader-sync
|
49b02812fe555675dc7848dbbd289a282cac5efd
|
[
"MIT"
] | null | null | null |
from backend.common import Document
import sqlite3
# SQLite3 backend, stores data in a local .db file
class BackendSQLite:
# The database location
database: str
# Errors will be propagated to the object's creator
def __init__(self, database: str):
# Initialize the database and get a cursor
connection = sqlite3.connect(database)
cursor = connection.cursor()
# Create the users table if it doesn't exist
cursor.execute('''CREATE TABLE IF NOT EXISTS users
(username text, userkey text)''')
# Create the documents table if it doesn't exist
cursor.execute('''CREATE TABLE IF NOT EXISTS documents
(username text, document text, progress text,
percentage float, device text, device_id text,
timestamp int)''')
# Commit and free the cursor and connection
cursor.close()
connection.commit()
connection.close()
# Save the database file location
self.database = database
# Adds a username/userkey combination.
# Returns False if the user already exists.
def create_user(self, username: str, userkey: str):
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
cursor.execute("SELECT * FROM users WHERE username = ?", (username,))
if cursor.fetchone():
# Attempted to add a user that already exists
return False
# Let's add the user:
cursor.execute("INSERT INTO users VALUES (?, ?)", (username, userkey))
# Cleanup
cursor.close()
connection.commit()
connection.close()
return True
# Updates a document, creating if it does not exist.
def update_document(self, username: str, document: Document):
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
# If the document doesn't exist, let's create it.
cursor.execute("SELECT * FROM documents WHERE username = ? AND document = ?", (username, document.document))
if not cursor.fetchone():
cursor.execute("INSERT INTO documents VALUES (?, ?, ?, ?, ?, ?, ?)",
(username, document.document, document.progress, document.percentage,
document.device, document.device_id, document.timestamp))
# If the document _does_ exist, update it.
cursor.execute('''UPDATE documents
SET progress = ?, percentage = ?, device = ?, device_id = ?, timestamp = ?
WHERE username = ? AND document = ?''',
(document.progress, document.percentage, document.device, document.device_id,
document.timestamp, username, document.document))
# Cleanup
cursor.close()
connection.commit()
connection.close()
# Checks if a login is valid.
# Returns True if it is, False if not.
def check_login(self, username: str, userkey: str) -> bool:
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
# Check if the username/userkey combo exists.
cursor.execute("SELECT * FROM users WHERE username = ? AND userkey = ?", (username, userkey))
exists = bool(cursor.fetchone())
# Cleanup
cursor.close()
connection.commit()
connection.close()
# Return our result
return exists
# Gets the details of a document present in the database.
# Returns None if it doesn't exist.
def get_document(self, username: str, document: str) -> Document:
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
# Get the relevant row in the table
cursor.execute("SELECT * FROM documents WHERE username = ? AND document = ?", (username, document))
row = cursor.fetchone()
if not row:
# Document isn't present in the database
return None
# Create a document instance from it
resultdoc = Document(row[1], row[2], row[3], row[4], row[5], row[6])
# Cleanup
cursor.close()
connection.commit()
connection.close()
return resultdoc
# Prints the database to the console.
# Intended for debugging.
def dbg_print_database(self):
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
# Print the users table
print("------ Table 'users' ------")
for row in cursor.execute("SELECT * FROM users"):
print(row)
# Print the documents table
print("\n------ Table 'documents' ------")
for row in cursor.execute("SELECT * FROM documents"):
print(row)
# Cleanup
cursor.close()
connection.close()
| 35.768116
| 116
| 0.595827
|
41fb73948c68b263afbd7b08a02ef0ce88b7d126
| 1,301
|
py
|
Python
|
Models/DCGAN/Discriminator.py
|
z1xuanchen/datares_GANs
|
f9a03950bc4ba378e9eebb7cfd07377afd72b53c
|
[
"MIT"
] | 6
|
2021-04-03T03:21:49.000Z
|
2022-03-24T07:15:56.000Z
|
Models/DCGAN/Discriminator.py
|
z1xuanchen/datares_GANs
|
f9a03950bc4ba378e9eebb7cfd07377afd72b53c
|
[
"MIT"
] | 1
|
2021-04-27T15:23:35.000Z
|
2021-04-27T15:27:31.000Z
|
Models/DCGAN/Discriminator.py
|
z1xuanchen/datares_GANs
|
f9a03950bc4ba378e9eebb7cfd07377afd72b53c
|
[
"MIT"
] | 6
|
2021-02-15T09:46:48.000Z
|
2021-11-29T17:14:56.000Z
|
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, channels_img, features_d):
super(Discriminator, self).__init__()
self.disc = nn.Sequential(
# input: N x channels_img x 64 x 64
nn.Conv2d(
channels_img, features_d, kernel_size=4, stride=2, padding=1
),
nn.LeakyReLU(0.2),
# _block(in_channels, out_channels, kernel_size, stride, padding)
self._block(features_d, features_d * 2, 4, 2, 1),
self._block(features_d * 2, features_d * 4, 4, 2, 1),
self._block(features_d * 4, features_d * 8, 4, 2, 1),
# After all _block img output is 4x4 (Conv2d below makes into 1x1)
nn.Conv2d(features_d * 8, 1, kernel_size=4, stride=2, padding=0),
nn.Sigmoid(),
)
def _block(self, in_channels, out_channels, kernel_size, stride, padding):
return nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
bias=False,
),
# nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.2),
)
def forward(self, x):
return self.disc(x)
| 32.525
| 78
| 0.537279
|
933f4b73ba67403baa1aca5a194b7778271669ad
| 1,730
|
py
|
Python
|
elementary.py
|
chapman-phys220-2018f/cw05-frank-raha
|
ca52420442fb24a107345993580c1c9b0f25efda
|
[
"MIT"
] | null | null | null |
elementary.py
|
chapman-phys220-2018f/cw05-frank-raha
|
ca52420442fb24a107345993580c1c9b0f25efda
|
[
"MIT"
] | null | null | null |
elementary.py
|
chapman-phys220-2018f/cw05-frank-raha
|
ca52420442fb24a107345993580c1c9b0f25efda
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####
# Raha and Frank
# Email: pirzadeh@chapman.edu
# Elementary module for CW 5
# PHYS 220
# 10/1/18
####
import scipy.constants
class Particle(object):
"""Particle is a class that should have 3 initialized variables: Mass(a float), Position(A triplet of floats), Momentum(A triplet of floats)"""
mass = 0.0
position = (0.0,0.0,0.0)
momentum = (0.0,0.0,0.0)
def __init__(self, x, y, z):
"""Inits the class, arg1: x position float, arg2: y position float, arg3: z positionfloat"""
self.position = (x, y, z)
self.mass = 1.0
self.momentum = (0.0,0.0,0.0)
def impulse(self, px,py,pz):
"""Alters the momentum by the impulse amount. Needs floating point triple."""
self.momentum = (self.momentum[0]+px,self.momentum[1]+py,self.momentum[2]+pz)
def move(self, dt):
self.position = (self.position[0] + (dt/self.mass)*self.momentum[0],self.position[1]+(dt/self.mass)*self.momentum[1],self.position[2]+(dt/self.mass)*self.momentum[2])
class ChargedParticle(Particle):
charge = 0.0
def __init__(self, x, y, z):
"""uses the super constructor to construct the class instance"""
super(Particle,self).__init__(x, y, z)
self.charge = 0.0
class Electron(ChargedParticle):
def __init__(self, x, y, z):
self.charge = -scipy.constants.e
super(ChargedParticle,self).__init__(x, y, z)
self.mass = scipy.constants.m_e
class Proton(ChargedParticle):
def __init__(self, x, y, z):
self.charge = scipy.constants.e
super(ChargedParticle,self).__init__(x, y, z)
self.mass = scipy.constants.m_p
def main(argv):
pass
| 30.350877
| 174
| 0.631792
|
7b3adef69524f059195e5f7f20c58d62c9bff670
| 6,461
|
py
|
Python
|
CreateJiraIssue.py
|
theluckyteam/sublime-jira
|
167b32be9b6a358795508247e80cdc6132542276
|
[
"BSD-3-Clause"
] | null | null | null |
CreateJiraIssue.py
|
theluckyteam/sublime-jira
|
167b32be9b6a358795508247e80cdc6132542276
|
[
"BSD-3-Clause"
] | null | null | null |
CreateJiraIssue.py
|
theluckyteam/sublime-jira
|
167b32be9b6a358795508247e80cdc6132542276
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
import sublime, sublime_plugin
import re
import types
import json
import urllib
from urllib.error import URLError
def parse_issue_stream(stream):
# Разбить выражение на строки и понять каким образом его обрабатывать
lines = []
parts = stream.split("\n")
for part in parts:
if part.strip() != '':
lines.append(part)
# Смысл имеет только выражение, содержащее минимум одну строку
if len(lines) < 1:
return None
# Подготовить описание для задачи из выражения
description = "\n".join(lines[1:]) if len(lines) > 1 else ''
# Далее имеет смысл только первая строка выражения
stream = lines[0]
# Подготовим оцениваемое время на работу с задачей
temp_result = re.search(r'\~(.+)', stream)
estimate = temp_result.group(1).strip() if temp_result is not None else ''
stream = re.sub(r'\~(.+)', '', stream).strip()
# Подготовим метки
labels = re.findall(r'\#(\S+)', stream)
stream = re.sub(r'\#(\S+)', '', stream).strip()
# Подготовим название
summary = stream
# Подготовить структуру данных для заведения задачи
issue = {
"summary": summary,
"description": description,
"estimate": estimate,
"labels": labels
}
return issue
class TokenGettingError(Exception):
""" Ошибка получения токена """
pass
class TokenFailedError(Exception):
""" Неверный токен авторизации """
pass
class IssueCreatingError(Exception):
""" Ошибка создания задачи """
pass
class CreateJiraIssueCommand(sublime_plugin.TextCommand):
def settings(self):
return self.view.settings()
def run(self, edit, project, issuetype):
settings = self.settings()
issue_project = project
issue_type = issuetype
issue_creation_url = settings.get('jira_issue_creation_url')
issue_assignee = settings.get('jira_issue_assignee')
issue_component = settings.get('jira_issue_component')
try:
# Переберем все выделенные области в текущем view
for region in self.view.sel():
issue_expression = self.view.substr(region)
issue_attributes = parse_issue_stream(issue_expression)
issue_definition = {
"fields": {
"summary": issue_attributes['summary'],
"description": issue_attributes['description'],
"issuetype": {
"id": issue_type
},
"project": {
"id": issue_project,
},
"labels": issue_attributes['labels'],
"timetracking": {
"remainingEstimate": issue_attributes['estimate']
},
"assignee": {
"name": issue_assignee,
},
"components": [
{
"id": issue_component
}
]
}
}
issue = self.create_issue(issue_creation_url, issue_definition)
issue_result = issue['key'] + ' - ' + issue_expression
self.view.replace(edit, region, issue_result)
except:
sublime.message_dialog('При создании задачи произошла ошибка. Проверьте аргументы и попробуйте снова.')
def clear_access_token(self):
settings = self.settings()
settings.erase('jira_access_token')
def access_token(self):
settings = self.settings()
if settings.has('jira_access_token'):
access_token = settings.get('jira_access_token')
else:
url = settings.get('jira_auth_url')
username = settings.get('jira_auth_username')
password = settings.get('jira_auth_password')
data = {
"username": username,
"password": password,
}
access_token = self.request_access_token(url, data)
settings.set('jira_access_token', access_token)
return access_token
def request_access_token(self, url, data):
json_data = json.dumps(data, ensure_ascii=False)
json_bytes = json_data.encode('utf-8')
json_bytes_length = len(json_bytes)
request = urllib.request.Request(url)
request.add_header('Content-Type', 'application/json; charset=utf-8')
request.add_header('Content-Length', json_bytes_length)
sublime.status_message('Request to "%s"' % url)
try:
response = urllib.request.urlopen(request, json_bytes)
response_data = response.read().decode('utf-8')
result = json.loads(response_data)
cookie_auth = result['session']
except:
raise TokenGettingError('Could not get access token.')
return cookie_auth
def create_issue(self, url, definition):
attempt = 3
while attempt > 0:
try:
access_token = self.access_token()
attempt -= 1
return self.request_create_issue(url, definition, access_token)
except TokenFailedError:
self.clear_access_token()
def request_create_issue(self, url, definition, token):
json_data = json.dumps(definition, ensure_ascii=False)
json_bytes = json_data.encode('utf-8')
json_bytes_length = len(json_bytes)
request = urllib.request.Request(url)
request.add_header('Cookie', '='.join([token['name'], token['value']]))
request.add_header('Content-Type', 'application/json; charset=utf-8')
request.add_header('Content-Length', json_bytes_length)
sublime.status_message('Request to "%s"' % url)
try:
response = urllib.request.urlopen(request, json_bytes)
response_data = response.read().decode('utf-8')
return json.loads(response_data)
except URLError as error:
if error.code == 401:
raise TokenFailedError('Token is failed.')
else:
raise IssueCreatingError('Could not create issue.')
except:
raise IssueCreatingError('Could not create issue.')
| 32.305
| 115
| 0.572822
|
4f88fcf8afb833ccd478636b1a8686047f70c90c
| 4,513
|
py
|
Python
|
yepes/utils/phased.py
|
samuelmaudo/yepes
|
1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb
|
[
"BSD-3-Clause"
] | null | null | null |
yepes/utils/phased.py
|
samuelmaudo/yepes
|
1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb
|
[
"BSD-3-Clause"
] | null | null | null |
yepes/utils/phased.py
|
samuelmaudo/yepes
|
1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import base64
import re
from django.contrib.messages.storage.base import BaseStorage
from django.http import HttpRequest
from django.template.base import (
COMMENT_TAG_START, COMMENT_TAG_END,
Template,
TemplateSyntaxError,
)
from django.template.context import BaseContext, RequestContext, Context
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import Promise, LazyObject
from django.utils.six.moves import cPickle as pickle
from yepes.conf import settings
SECRET_DELIMITER = '{0} {1} {2}'.format(
COMMENT_TAG_START,
settings.PHASED_SECRET_DELIMITER,
COMMENT_TAG_END)
PICKLED_CONTEXT_RE = re.compile(r'.*{0} context (.*) {1}.*'.format(
COMMENT_TAG_START,
COMMENT_TAG_END))
FORBIDDEN_CLASSES = (Promise, LazyObject, HttpRequest, BaseStorage)
def backup_csrf_token(context, storage):
"""
Get the CSRF token and convert it to a string (since it's lazy).
"""
token = context.get('csrf_token', 'NOTPROVIDED')
storage['csrf_token'] = force_bytes(token)
def flatten_context(context, remove_lazy=True):
"""
Creates a dictionary from a ``Context`` instance by traversing its dicts
list. Can remove unwanted subjects from the result, e.g. lazy objects.
"""
flat_context = {}
def _flatten(context):
if isinstance(context, dict):
for k, v in six.iteritems(context):
if isinstance(context, BaseContext):
_flatten(context)
else:
flat_context[k] = v
elif isinstance(context, BaseContext):
for context_dict in context.dicts:
_flatten(context_dict)
# traverse the passed context and update the dictionary accordingly
_flatten(context)
if remove_lazy:
only_allowed = lambda dic: not isinstance(dic[1], FORBIDDEN_CLASSES)
return dict(filter(only_allowed, six.iteritems(flat_context)))
else:
return flat_context
def pickle_context(context, template=None):
"""
Pickle the given ``Context`` instance and do a few optimizations before.
"""
context = flatten_context(context)
context.pop('False', None)
context.pop('None', None)
context.pop('True', None)
pickled_context = base64.standard_b64encode(
pickle.dumps(context, protocol=pickle.HIGHEST_PROTOCOL))
if template is not None:
return template.format(context=pickled_context)
else:
return '{0} context {1} {2}'.format(
COMMENT_TAG_START,
pickled_context,
COMMENT_TAG_END)
def restore_csrf_token(request, storage):
"""
Given the request and a the context used during the second render phase,
this wil check if there is a CSRF cookie and restores if needed, to
counteract the way the CSRF framework invalidates the CSRF token after
each request/response cycle.
"""
try:
request.META['CSRF_COOKIE'] = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
csrf_token = storage.pop('csrf_token', None)
if csrf_token:
request.META['CSRF_COOKIE'] = csrf_token
def second_pass_render(request, content):
"""
Split on the secret delimiter and render the phased blocks.
"""
content = force_text(content)
result = []
for index, bit in enumerate(content.split(SECRET_DELIMITER)):
if index % 2:
template = Template(bit)
else:
result.append(bit)
continue
context = unpickle_context(bit)
restore_csrf_token(request, context)
request_context = RequestContext(request, context)
try:
rendered = template.render(request_context)
except TemplateSyntaxError:
# For example, in debug pages.
return content
if SECRET_DELIMITER in rendered:
rendered = second_pass_render(request, rendered)
result.append(rendered)
return force_bytes(''.join(result))
def unpickle_context(content, pattern=None):
"""
Unpickle the context from the given content string or return ``None``.
"""
if pattern is None:
pattern = PICKLED_CONTEXT_RE
match = pattern.search(content)
if match is not None:
return pickle.loads(base64.standard_b64decode(match.group(1)))
else:
return {}
| 30.086667
| 80
| 0.666297
|
acb8fe08b9f8f77c4ecb3e04e4355066b3fc66b8
| 1,831
|
py
|
Python
|
__pandas/06.py
|
zlikun-lang/python-data-analysis
|
66eaa04952e422fa881281dc2244f2984a15e9bf
|
[
"Apache-2.0"
] | 1
|
2020-05-29T07:43:10.000Z
|
2020-05-29T07:43:10.000Z
|
__pandas/06.py
|
zlikun-lang/python-data-analysis
|
66eaa04952e422fa881281dc2244f2984a15e9bf
|
[
"Apache-2.0"
] | null | null | null |
__pandas/06.py
|
zlikun-lang/python-data-analysis
|
66eaa04952e422fa881281dc2244f2984a15e9bf
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from pandas import Series, DataFrame
# 通过列表构造一个Series,指定其索引,两者数量必须匹配
obj = Series([4.5, 7.2, -5.3, 3.6], index=['a', 'b', 'c', 'd'])
# a 4.5
# b 7.2
# c -5.3
# d 3.6
# dtype: float64
print(obj)
# 重新索引,缺失项由NaN填充
obj2 = obj.reindex(['a', 'b', 'c', 'm', 'n'])
# a 4.5
# b 7.2
# c -5.3
# m NaN
# n NaN
# dtype: float64
print(obj2)
# 缺失位可以用指定值填充
obj2 = obj.reindex(['a', 'b', 'c', 'm', 'n'], fill_value=0)
# a 4.5
# b 7.2
# c -5.3
# m 0.0
# n 0.0
# dtype: float64
print(obj2)
obj = Series(['red', 'green', 'blue'], index=[0, 2, 4])
# 0 red
# 2 green
# 4 blue
# dtype: object
print(obj)
# 使用method做插值处理,ffill/pad表示前填充,bfill/backfill表示向后填充
obj2 = obj.reindex(range(6), method='ffill')
# 0 red
# 1 red
# 2 green
# 3 green
# 4 blue
# 5 blue
# dtype: object
print(obj2)
obj2 = obj.reindex(range(6), method='bfill')
# 0 red
# 1 green
# 2 green
# 3 blue
# 4 blue
# 5 NaN
# dtype: object
print(obj2)
# 使用数组构造DataFrame,指定索引和列
frame = DataFrame(np.arange(9).reshape(3, 3), index=['a', 'c', 'd'], columns=['A', 'B', 'C'])
# A B C
# a 0 1 2
# c 3 4 5
# d 6 7 8
print(frame)
# 重新索引
frame2 = frame.reindex(['a', 'b', 'c', 'd'])
# A B C
# a 0.0 1.0 2.0
# b NaN NaN NaN
# c 3.0 4.0 5.0
# d 6.0 7.0 8.0
print(frame2)
# 也可以对列重新生成,通过columns字段指定
frame2 = frame.reindex(index=['a', 'b', 'c', 'd'], columns=['A', 'B', 'C', 'D'])
# A B C D
# a 0.0 1.0 2.0 NaN
# b NaN NaN NaN NaN
# c 3.0 4.0 5.0 NaN
# d 6.0 7.0 8.0 NaN
print(frame2)
# DataFrame也可以进行插值填充,但只能针对行填充,不能针对列填充
frame2 = frame.reindex(index=['a', 'b', 'c', 'd'], columns=['A', 'B', 'C', 'D'], method='bfill')
# A B C D
# a 0 1 2 NaN
# b 3 4 5 NaN
# c 3 4 5 NaN
# d 6 7 8 NaN
print(frame2)
| 19.072917
| 96
| 0.523211
|
7545239b1cb62f89d220091b1ee79e100d00c237
| 15,175
|
py
|
Python
|
src/m4_accumulating_sequences.py
|
craannj/12-MoreSequences
|
df1ca36a0c5d7e66f6bcffcddb86b2ad45dc8cd9
|
[
"MIT"
] | null | null | null |
src/m4_accumulating_sequences.py
|
craannj/12-MoreSequences
|
df1ca36a0c5d7e66f6bcffcddb86b2ad45dc8cd9
|
[
"MIT"
] | null | null | null |
src/m4_accumulating_sequences.py
|
craannj/12-MoreSequences
|
df1ca36a0c5d7e66f6bcffcddb86b2ad45dc8cd9
|
[
"MIT"
] | null | null | null |
"""
This module lets you practice BUILDING-UP a new SEQUENCE,
one item at a time, using the ACCUMULATOR pattern.
-- We will later see a more efficient way to build-up and/or modify
sequences, namely by MUTATING their elements.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Nathaniel Craan.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the various TEST functions in this module. """
run_test_make_simple_list()
run_test_make_simple_string()
run_test_make_less_simple_string()
# -------------------------------------------------------------------------
# TODO: 8. Uncomment the tests below before working _TODO_ 9.
# They launch annoying rg.RoseWindows on each run that you don't want
# until you get to _TODO_ 9 and _TODO_ 10.
# -------------------------------------------------------------------------
# run_test_draw_shapes()
# run_test_rectangles_from_circles()
def run_test_make_simple_list():
""" Tests the make_simple_list function. """
# -------------------------------------------------------------------------
# DONE: 2. Implement this TEST function.
# It TESTS the make_simple_list function defined below.
# Include at least ** 2 ** tests.
#
# Use the same 4-step process as for previous TEST functions.
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the make_simple_list function:')
print('--------------------------------------------------')
# Test 1:
expected = [5, 6, 7, 8, 9, 10, 11, 12, 13]
actual = make_simple_list(5, 13)
print('Expected:', expected)
print('Actual: ', actual)
# Test 2 (add your test here):
expected = [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
actual = make_simple_list(-1, 15)
print('Expected:', expected)
print('Actual: ', actual)
def make_simple_list(m, n):
"""
What comes in:
-- a positive integer m
-- a positive integer n that is >= m
What goes out: Returns the list [m, m+1, m+2, ... n],
where m and n are the given arguments.
Side effects: None.
Examples:
If m is 5 and n is 13, then this function returns:
[5, 6, 7, 8, 9, 10, 11, 12, 13]
If m and n are both 205, then this function returns: [205]
Type hints:
:type m: int
:type n: int
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# -------------------------------------------------------------------------
nums = []
for k in range(m, n + 1):
nums = nums + [k]
return nums
def run_test_make_simple_string():
""" Tests the make_simple_string function. """
# -------------------------------------------------------------------------
# DONE: 4. Implement this TEST function.
# It TESTS the make_simple_string function defined below.
# Include at least ** 2 ** tests.
#
# Use the same 4-step process as for previous TEST functions.
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the make_simple_string function:')
print('--------------------------------------------------')
# Test 1:
expected = '5678910111213'
actual = make_simple_string(5, 13)
print('Expected:', expected)
print('Actual: ', actual)
# Test 2:
expected = '123456789'
actual = make_simple_string(1, 9)
print('Expected:', expected)
print('Actual: ', actual)
def make_simple_string(m, n):
"""
What comes in:
-- a positive integer m
-- a positive integer n that is >= m
What goes out: Returns the STRING whose characters are
m, m+1, m+2, ... n,
each with a '-' character after it,
where m and n are the given arguments.
Side effects: None.
Examples:
If m is 5 and n is 13, then this function returns:
'5-6-7-8-9-10-11-12-13-'
If m and n are both 205, then this function returns: '205-'
Type hints:
:type m: int
:type n: int
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
# -------------------------------------------------------------------------
nums = ''
for k in range(m, n + 1):
nums = nums + str(k)
return nums
def run_test_make_less_simple_string():
""" Tests the make_less_simple_string function. """
# -------------------------------------------------------------------------
# TODO: 6. Implement this TEST function.
# It TESTS the make_less_simple_string function defined below.
# Include at least ** 2 ** tests.
#
# Use the same 4-step process as for previous TEST functions.
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the make_less_simple_string function:')
print('--------------------------------------------------')
def make_less_simple_string(m, n):
"""
What comes in:
-- a positive integer m
-- a positive integer n that is >= m
What goes out: The same as the previous problem,
but WITHOUT the hyphen after the LAST number.
That is, this function returns the STRING whose characters are
m, m+1, m+2, ... n,
with a '-' character BETWEEN each of the items
and where m and n are the given arguments.
Side effects: None.
Examples:
If m is 5 and n is 13, then this function returns:
'5-6-7-8-9-10-11-12-13'
If m and n are both 205, then this function returns: '205'
Type hints:
:type m: int
:type n: int
"""
# -------------------------------------------------------------------------
# TODO: 7. Implement and test this function.
# Note that you should write its TEST function first (above).
# -------------------------------------------------------------------------
def run_test_draw_shapes():
""" Tests the draw_shapes function. """
print()
print('-----------------------------------------------------------')
print('Testing the draw_shapes function:')
print('-----------------------------------------------------------')
print('See the graphics window that pops up.')
print('It should show 3 circles: red, white and blue.')
print()
print('Then it should ask the user to click the mouse to continue.')
print('Then it should show 4 more shapes: a green circle,')
print(' a yellow rectangle, a red circle and a thick black line.')
# -------------------------------------------------------------------------
# Test 1 is ALREADY DONE (here).
# -------------------------------------------------------------------------
window = rg.RoseWindow(500, 330, 'draw_shapes, two tests')
circles = [rg.Circle(rg.Point(50, 50), 50),
rg.Circle(rg.Point(120, 50), 20),
rg.Circle(rg.Point(250, 170), 130)]
circles[0].fill_color = 'red'
circles[1].fill_color = 'white'
circles[2].fill_color = 'blue'
draw_shapes(circles, window)
window.continue_on_mouse_click()
# -------------------------------------------------------------------------
# Test 2 is ALREADY DONE (here).
# It runs in the same window as Test 1.
# The bottom circle should appear only PARTIALLY in the window;
# that is purposeful.
# -------------------------------------------------------------------------
rect_width = 100
rect_height = 160
rect_center = rg.Point(350, 100)
various = [rg.Circle(rg.Point(400, 50), 30),
rg.Rectangle(rg.Point(rect_center.x - rect_width / 2,
rect_center.y - rect_height / 2),
rg.Point(rect_center.x + rect_width / 2,
rect_center.y + rect_height / 2)),
rg.Circle(rg.Point(400, 300), 80),
rg.Line(rg.Point(0, 0), rg.Point(100, 330))]
various[0].fill_color = 'green'
various[1].fill_color = 'yellow'
various[2].fill_color = 'red'
various[3].thickness = 10
draw_shapes(various, window)
window.close_on_mouse_click()
def draw_shapes(shapes, window):
"""
What comes in:
-- a sequence of rg.Shape objects
Note: rg.Line, rg.Circle, rg.Point, ... are all rg.Shape
objects.
-- an rg.RoseWindow
What goes out: Nothing (i.e., None).
Side effects:
See draw_shapes.pdf in this project for pictures
that may help you better understand the following:
For each rg.Shape in the given sequence of rg.Shape objects,
1. Attaches the rg.Shape to the given rg.RoseWindow.
2. Renders the rg.RoseWindow with a 0.3 second delay
after the render.
Examples:
See the draw_shapes.pdf file in this project.
Type hints:
:type shapes: list | tuple of rg._Shape
:type window: rg.RoseWindow
"""
# -------------------------------------------------------------------------
# TODO: 9. Implement and test this function.
# *** Make sure you do _TODO_ 8 in main first! ***
# The testing code is already written for you; you enabled it via _TODO_ 8.
#
###########################################################################
# IMPORTANT: the same
# attach_to
# method works for ALL the rosegraphics shapes!
# FWIW: The word for ideas like this is "polymorphism".
###########################################################################
# -------------------------------------------------------------------------
def run_test_rectangles_from_circles():
""" Tests the rectangles_from_circles function. """
print()
print('-----------------------------------------------------------')
print('Testing the rectangles_from_circles function:')
print('-----------------------------------------------------------')
print('See the graphics window that pops up.')
print('It should show circles, then the circles circumscribed,')
print('then more circles, then the new circles circumscribed too.')
print()
print('See rectangles_from_circles.pdf in this project')
print('for pictures of the anticipated results.')
# -------------------------------------------------------------------------
# Test 1 is ALREADY DONE (here).
# -------------------------------------------------------------------------
window = rg.RoseWindow(650, 350,
'rectangles_from_circles, two tests')
circles = [rg.Circle(rg.Point(50, 80), 40),
rg.Circle(rg.Point(150, 50), 30),
rg.Circle(rg.Point(300, 100), 50),
rg.Circle(rg.Point(220, 70), 60)]
circles[0].fill_color = 'red'
circles[1].fill_color = 'white'
circles[2].fill_color = 'blue'
circles[3].fill_color = 'green'
# -------------------------------------------------------------------------
# This test calls the draw_shapes function that YOU write,
# above. So if your draw_shapes breaks, so will this test.
# -------------------------------------------------------------------------
draw_shapes(circles, window)
message = 'The circles to be circumscribed are shown above.'
message = message + ' Click to continue.'
window.continue_on_mouse_click(message)
rectangles = rectangles_from_circles(circles)
if rectangles is None:
print()
print('Either you have not yet gotten')
print(' to the rectangles_from_circles problem (OK, no problem)')
print(' or you have forgotten to return a result from that function.')
window.close_on_mouse_click()
return
draw_shapes(rectangles, window)
message = 'Now you should see the circumscribing rectangles too.'
message = message + ' Click to continue.'
window.continue_on_mouse_click(message)
# -------------------------------------------------------------------------
# Test 2 is ALREADY DONE (here).
# It runs in the same window as Test 1.
# -------------------------------------------------------------------------
circles = []
center = rg.Point(50, 150)
radius = 35
for _ in range(10):
circle = rg.Circle(center, radius)
circle.fill_color = 'magenta'
circles = circles + [circle]
center.x = center.x + 2 * radius
center.y = center.y + 15
radius = radius - 3
draw_shapes(circles, window)
message = 'More circles to be circumscribed are shown above.'
message = message + ' Click to continue.'
window.continue_on_mouse_click(message)
rectangles = rectangles_from_circles(circles)
draw_shapes(rectangles, window)
message = 'Now you should see the circumscribing rectangles too.'
message = message + ' Click to exit.'
window.continue_on_mouse_click(message, close_it=True)
def rectangles_from_circles(circles):
"""
See rectangles_from_circles.pdf in this project for pictures
that may help you better understand the following specification:
What comes in:
-- a sequence of rg.Circle objects
What goes out:
Returns a list of rg.Rectangles, where each rg.Rectangle circumscribes
its corresponding rg.Circle in the given list of rg.Circles.
Side effects: None.
Examples: See rectangles_from_circles.pdf in this project.
Type hints:
:type circles: list | tuple of rg.Circle
:rtype: list of rg.Rectangles
"""
# -------------------------------------------------------------------------
# TODO: 10. Implement and test this function.
# The testing code is already written for you (above).
#
###########################################################################
# IMPORTANT: Examine the testing code above carefully. Be sure
# that you understand WHY the tests are adequate tests!
#
# IMPORTANT: The specification does NOT say to draw anything
# in this function, so DON'T draw anything in here!
###########################################################################
# -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 38.810742
| 79
| 0.492389
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.