hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75f1dd73285bc1c583f36fa5211e973c274857a4
| 8,602
|
py
|
Python
|
savu/tomo_recon.py
|
malte-storm/Savu
|
16291e8a22464c50c511af01fbc648860c1236e6
|
[
"Apache-2.0"
] | 1
|
2021-04-18T09:30:54.000Z
|
2021-04-18T09:30:54.000Z
|
savu/tomo_recon.py
|
malte-storm/Savu
|
16291e8a22464c50c511af01fbc648860c1236e6
|
[
"Apache-2.0"
] | null | null | null |
savu/tomo_recon.py
|
malte-storm/Savu
|
16291e8a22464c50c511af01fbc648860c1236e6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: tomo_recon
:platform: Unix
:synopsis: Runner for the Savu framework
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import tempfile # this import is required for pyFAI - DO NOT REMOVE!
import argparse
import traceback
import sys
import os
from mpi4py import MPI
from savu.version import __version__
from savu.core.basic_plugin_runner import BasicPluginRunner
from savu.core.plugin_runner import PluginRunner
def __option_parser():
""" Option parser for command line arguments.
"""
version = "%(prog)s " + __version__
parser = argparse.ArgumentParser(prog='savu')
hide = argparse.SUPPRESS
parser.add_argument('in_file', help='Input data file.')
process_str = 'Process list, created with the savu configurator.'
parser.add_argument('process_list', help=process_str)
parser.add_argument('out_folder', help='Output folder.')
parser.add_argument('--version', action='version', version=version)
parser.add_argument("-f", "--folder", help="Override output folder name")
tmp_help = "Store intermediate files in a temp directory."
parser.add_argument("-d", "--tmp", help=tmp_help)
template_help = "Pass a template file of plugin input parameters."
parser.add_argument("-t", "--template", help=template_help, default=None)
log_help = "Store full log file in a separate location"
parser.add_argument("-l", "--log", help=log_help)
v_help = "Display all debug log messages"
parser.add_argument("-v", "--verbose", help=v_help, action="store_true",
default=False)
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
help="Display only Errors and Info.", default=False)
# temporary flag to fix lustre issue
parser.add_argument("--lustre_workaround", action="store_true",
dest="lustre", help="Avoid lustre segmentation fault",
default=False)
sys_params_help = "Override default path to Savu system parameters file."
parser.add_argument("--system_params", help=sys_params_help, default=None)
# Hidden arguments
# process names
parser.add_argument("-n", "--names", help=hide, default="CPU0")
# transport mechanism
parser.add_argument("--transport", help=hide, default="hdf5")
# Set Savu mode
parser.add_argument("-m", "--mode", help=hide, default="full",
choices=['basic', 'full'])
# Set logging to cluster mode
parser.add_argument("-c", "--cluster", action="store_true", help=hide,
default=False)
# Send an email on completion
parser.add_argument("-e", "--email", dest="email", help=hide, default=None)
# Facility email for errors
parser.add_argument("--facility_email", dest="femail", help=hide,
default=None)
# Set beamline log file (for online processing)
parser.add_argument("--bllog", dest="bllog", help=hide, default=None)
# Location of syslog server
parser.add_argument("-s", "--syslog", dest="syslog", help=hide,
default='localhost')
# Port to connect to syslog server on
parser.add_argument("-p", "--syslog_port", dest="syslog_port",
help=hide, default=514, type=int)
parser.add_argument("--test_state", dest="test_state", default='False',
action='store_true', help=hide)
# DosNa related parameters
parser.add_argument("--dosna_backend", dest="dosna_backend", help=hide,
default=None)
parser.add_argument("--dosna_engine", dest="dosna_engine", help=hide,
default=None)
parser.add_argument("--dosna_connection", dest="dosna_connection",
help=hide, default=None)
parser.add_argument("--dosna_connection_options",
dest="dosna_connection_options", help=hide,
nargs='+', default=[])
check_help = "Continue Savu processing from a checkpoint."
choices = ['plugin', 'subplugin']
parser.add_argument("--checkpoint", nargs="?", choices=choices,
const='plugin', help=check_help, default=None)
args = parser.parse_args()
__check_conditions(parser, args)
return args
def __check_conditions(parser, args):
if args.checkpoint and not args.folder:
msg = "--checkpoint flag requires '-f folder_name', where folder_name"\
" contains the partially completed Savu job. The out_folder"\
" should be the path to this folder."
parser.error(msg)
def _set_options(args):
""" Set run specific information in options dictionary.
:params dict opt: input optional arguments (or defaults)
:params args: input required arguments
:returns options: optional and required arguments
:rtype: dict
"""
options = {}
options['data_file'] = args.in_file
options['process_file'] = args.process_list
options['mode'] = args.mode
options['template'] = args.template
options['transport'] = 'basic' if args.mode == 'basic' else args.transport
options['process_names'] = args.names
options['verbose'] = args.verbose
options['quiet'] = args.quiet
options['cluster'] = args.cluster
options['syslog_server'] = args.syslog
options['syslog_port'] = args.syslog_port
options['test_state'] = args.test_state
options['lustre'] = args.lustre
options['bllog'] = args.bllog
options['email'] = args.email
options['femail'] = args.femail
options['system_params'] = args.system_params
out_folder_name = \
args.folder if args.folder else __get_folder_name(options['data_file'])
out_folder_path = __create_output_folder(args.out_folder, out_folder_name)
options['out_folder'] = out_folder_name
options['out_path'] = out_folder_path
basename = os.path.basename(args.in_file)
options['datafile_name'] = os.path.splitext(basename)[0] if basename \
else args.in_file.split(os.sep)[-2]
inter_folder_path = __create_output_folder(args.tmp, out_folder_name)\
if args.tmp else out_folder_path
options['inter_path'] = inter_folder_path
options['log_path'] = args.log if args.log else options['inter_path']
options['nProcesses'] = len(options["process_names"].split(','))
# DosNa related options
options["dosna_backend"] = args.dosna_backend
options["dosna_engine"] = args.dosna_engine
options["dosna_connection"] = args.dosna_connection
options["dosna_connection_options"] = args.dosna_connection_options
options['checkpoint'] = args.checkpoint
return options
def __get_folder_name(in_file):
import time
MPI.COMM_WORLD.barrier()
timestamp = time.strftime("%Y%m%d%H%M%S")
MPI.COMM_WORLD.barrier()
split = in_file.split('.')
if len(split[-1].split(os.sep)) > 1:
split = in_file.split(os.sep)
name = split[-2] if split[-1] == '' else split[-1]
# if the input is a file
else:
name = os.path.basename(split[-2])
return '_'.join([timestamp, name])
def __create_output_folder(path, folder_name):
folder = os.path.join(path, folder_name)
if MPI.COMM_WORLD.rank == 0:
if not os.path.exists(folder):
os.makedirs(folder)
return folder
def main(input_args=None):
args = __option_parser()
if input_args:
args = input_args
options = _set_options(args)
pRunner = PluginRunner if options['mode'] == 'full' else BasicPluginRunner
if options['nProcesses'] == 1:
plugin_runner = pRunner(options)
plugin_runner._run_plugin_list()
else:
try:
plugin_runner = pRunner(options)
plugin_runner._run_plugin_list()
except Exception as error:
print error.message
traceback.print_exc(file=sys.stdout)
MPI.COMM_WORLD.Abort(1)
if __name__ == '__main__':
main()
| 37.4
| 79
| 0.663567
|
5f36c331ee1d56db346a9f28e5fe53ef24a4600a
| 1,960
|
py
|
Python
|
tempest/api_schema/compute/v3/availability_zone.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
tempest/api_schema/compute/v3/availability_zone.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
tempest/api_schema/compute/v3/availability_zone.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import availability_zone as common
base = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'availability_zone_info': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'zone_name': {'type': 'string'},
'zone_state': {
'type': 'object',
'properties': {
'available': {'type': 'boolean'}
},
'required': ['available']
},
# NOTE: Here is the difference between detail and
# non-detail
'hosts': {'type': 'null'}
},
'required': ['zone_name', 'zone_state', 'hosts']
}
}
},
'required': ['availability_zone_info']
}
}
get_availability_zone_list = copy.deepcopy(base)
get_availability_zone_list_detail = copy.deepcopy(base)
get_availability_zone_list_detail['response_body']['properties'][
'availability_zone_info']['items']['properties']['hosts'] = common.detail
| 36.296296
| 78
| 0.533673
|
55bb629bbc3a53f25ab8581535fe1e74529f9312
| 14,343
|
py
|
Python
|
flopy/mf6/modflow/mfgwfwel.py
|
hansonmcoombs/flopy
|
49398983c36d381992621d5bf698ea7f78fc0014
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
flopy/mf6/modflow/mfgwfwel.py
|
hansonmcoombs/flopy
|
49398983c36d381992621d5bf698ea7f78fc0014
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
flopy/mf6/modflow/mfgwfwel.py
|
hansonmcoombs/flopy
|
49398983c36d381992621d5bf698ea7f78fc0014
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY
# mf6/utils/createpackages.py
# FILE created on December 22, 2021 17:36:26 UTC
from .. import mfpackage
from ..data.mfdatautil import ListTemplateGenerator
class ModflowGwfwel(mfpackage.MFPackage):
"""
ModflowGwfwel defines a wel package within a gwf6 model.
Parameters
----------
model : MFModel
Model that this package is a part of. Package is automatically
added to model when it is initialized.
loading_package : bool
Do not set this parameter. It is intended for debugging and internal
processing purposes only.
auxiliary : [string]
* auxiliary (string) defines an array of one or more auxiliary variable
names. There is no limit on the number of auxiliary variables that
can be provided on this line; however, lists of information provided
in subsequent blocks must have a column of data for each auxiliary
variable name defined here. The number of auxiliary variables
detected on this line determines the value for naux. Comments cannot
be provided anywhere on this line as they will be interpreted as
auxiliary variable names. Auxiliary variables may not be used by the
package, but they will be available for use by other parts of the
program. The program will terminate with an error if auxiliary
variables are specified on more than one line in the options block.
auxmultname : string
* auxmultname (string) name of auxiliary variable to be used as
multiplier of well flow rate.
boundnames : boolean
* boundnames (boolean) keyword to indicate that boundary names may be
provided with the list of well cells.
print_input : boolean
* print_input (boolean) keyword to indicate that the list of well
information will be written to the listing file immediately after it
is read.
print_flows : boolean
* print_flows (boolean) keyword to indicate that the list of well flow
rates will be printed to the listing file for every stress period
time step in which "BUDGET PRINT" is specified in Output Control. If
there is no Output Control option and "PRINT_FLOWS" is specified,
then flow rates are printed for the last time step of each stress
period.
save_flows : boolean
* save_flows (boolean) keyword to indicate that well flow terms will be
written to the file specified with "BUDGET FILEOUT" in Output
Control.
auto_flow_reduce : double
* auto_flow_reduce (double) keyword and real value that defines the
fraction of the cell thickness used as an interval for smoothly
adjusting negative pumping rates to 0 in cells with head values less
than or equal to the bottom of the cell. Negative pumping rates are
adjusted to 0 or a smaller negative value when the head in the cell
is equal to or less than the calculated interval above the cell
bottom. AUTO_FLOW_REDUCE is set to 0.1 if the specified value is less
than or equal to zero. By default, negative pumping rates are not
reduced during a simulation.
timeseries : {varname:data} or timeseries data
* Contains data for the ts package. Data can be stored in a dictionary
containing data for the ts package with variable names as keys and
package data as values. Data just for the timeseries variable is also
acceptable. See ts package documentation for more information.
observations : {varname:data} or continuous data
* Contains data for the obs package. Data can be stored in a dictionary
containing data for the obs package with variable names as keys and
package data as values. Data just for the observations variable is
also acceptable. See obs package documentation for more information.
mover : boolean
* mover (boolean) keyword to indicate that this instance of the Well
Package can be used with the Water Mover (MVR) Package. When the
MOVER option is specified, additional memory is allocated within the
package to store the available, provided, and received water.
maxbound : integer
* maxbound (integer) integer value specifying the maximum number of
wells cells that will be specified for use during any stress period.
stress_period_data : [cellid, q, aux, boundname]
* cellid ((integer, ...)) is the cell identifier, and depends on the
type of grid that is used for the simulation. For a structured grid
that uses the DIS input file, CELLID is the layer, row, and column.
For a grid that uses the DISV input file, CELLID is the layer and
CELL2D number. If the model uses the unstructured discretization
(DISU) input file, CELLID is the node number for the cell. This
argument is an index variable, which means that it should be treated
as zero-based when working with FloPy and Python. Flopy will
automatically subtract one when loading index variables and add one
when writing index variables.
* q (double) is the volumetric well rate. A positive value indicates
recharge (injection) and a negative value indicates discharge
(extraction). If the Options block includes a TIMESERIESFILE entry
(see the "Time-Variable Input" section), values can be obtained from
a time series by entering the time-series name in place of a numeric
value.
* aux (double) represents the values of the auxiliary variables for
each well. The values of auxiliary variables must be present for each
well. The values must be specified in the order of the auxiliary
variables specified in the OPTIONS block. If the package supports
time series and the Options block includes a TIMESERIESFILE entry
(see the "Time-Variable Input" section), values can be obtained from
a time series by entering the time-series name in place of a numeric
value.
* boundname (string) name of the well cell. BOUNDNAME is an ASCII
character variable that can contain as many as 40 characters. If
BOUNDNAME contains spaces in it, then the entire name must be
enclosed within single quotes.
filename : String
File name for this package.
pname : String
Package name for this package.
parent_file : MFPackage
Parent package file that references this package. Only needed for
utility packages (mfutl*). For example, mfutllaktab package must have
a mfgwflak package parent_file.
"""
auxiliary = ListTemplateGenerator(("gwf6", "wel", "options", "auxiliary"))
ts_filerecord = ListTemplateGenerator(
("gwf6", "wel", "options", "ts_filerecord")
)
obs_filerecord = ListTemplateGenerator(
("gwf6", "wel", "options", "obs_filerecord")
)
stress_period_data = ListTemplateGenerator(
("gwf6", "wel", "period", "stress_period_data")
)
package_abbr = "gwfwel"
_package_type = "wel"
dfn_file_name = "gwf-wel.dfn"
dfn = [
[
"header",
"multi-package",
],
[
"block options",
"name auxiliary",
"type string",
"shape (naux)",
"reader urword",
"optional true",
],
[
"block options",
"name auxmultname",
"type string",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name boundnames",
"type keyword",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name print_input",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name print_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name save_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name auto_flow_reduce",
"type double precision",
"reader urword",
"optional true",
],
[
"block options",
"name ts_filerecord",
"type record ts6 filein ts6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package ts",
"construct_data timeseries",
"parameter_name timeseries",
],
[
"block options",
"name ts6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name filein",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name ts6_filename",
"type string",
"preserve_case true",
"in_record true",
"reader urword",
"optional false",
"tagged false",
],
[
"block options",
"name obs_filerecord",
"type record obs6 filein obs6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package obs",
"construct_data continuous",
"parameter_name observations",
],
[
"block options",
"name obs6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name obs6_filename",
"type string",
"preserve_case true",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block options",
"name mover",
"type keyword",
"tagged true",
"reader urword",
"optional true",
],
[
"block dimensions",
"name maxbound",
"type integer",
"reader urword",
"optional false",
],
[
"block period",
"name iper",
"type integer",
"block_variable True",
"in_record true",
"tagged false",
"shape",
"valid",
"reader urword",
"optional false",
],
[
"block period",
"name stress_period_data",
"type recarray cellid q aux boundname",
"shape (maxbound)",
"reader urword",
],
[
"block period",
"name cellid",
"type integer",
"shape (ncelldim)",
"tagged false",
"in_record true",
"reader urword",
],
[
"block period",
"name q",
"type double precision",
"shape",
"tagged false",
"in_record true",
"reader urword",
"time_series true",
],
[
"block period",
"name aux",
"type double precision",
"in_record true",
"tagged false",
"shape (naux)",
"reader urword",
"optional true",
"time_series true",
],
[
"block period",
"name boundname",
"type string",
"shape",
"tagged false",
"in_record true",
"reader urword",
"optional true",
],
]
def __init__(
self,
model,
loading_package=False,
auxiliary=None,
auxmultname=None,
boundnames=None,
print_input=None,
print_flows=None,
save_flows=None,
auto_flow_reduce=None,
timeseries=None,
observations=None,
mover=None,
maxbound=None,
stress_period_data=None,
filename=None,
pname=None,
parent_file=None,
):
super().__init__(
model, "wel", filename, pname, loading_package, parent_file
)
# set up variables
self.auxiliary = self.build_mfdata("auxiliary", auxiliary)
self.auxmultname = self.build_mfdata("auxmultname", auxmultname)
self.boundnames = self.build_mfdata("boundnames", boundnames)
self.print_input = self.build_mfdata("print_input", print_input)
self.print_flows = self.build_mfdata("print_flows", print_flows)
self.save_flows = self.build_mfdata("save_flows", save_flows)
self.auto_flow_reduce = self.build_mfdata(
"auto_flow_reduce", auto_flow_reduce
)
self._ts_filerecord = self.build_mfdata("ts_filerecord", None)
self._ts_package = self.build_child_package(
"ts", timeseries, "timeseries", self._ts_filerecord
)
self._obs_filerecord = self.build_mfdata("obs_filerecord", None)
self._obs_package = self.build_child_package(
"obs", observations, "continuous", self._obs_filerecord
)
self.mover = self.build_mfdata("mover", mover)
self.maxbound = self.build_mfdata("maxbound", maxbound)
self.stress_period_data = self.build_mfdata(
"stress_period_data", stress_period_data
)
self._init_complete = True
| 36.776923
| 79
| 0.571429
|
8d9bb0b2df94b0ae2d6ce456be8967d71ad89c86
| 51
|
py
|
Python
|
torchtraining/functional/metrics/__init__.py
|
szymonmaszke/torchtraining
|
1ddf169325b7239d6d6686b20072a406b69a0180
|
[
"MIT"
] | 3
|
2020-08-26T06:11:58.000Z
|
2020-08-27T08:11:15.000Z
|
torchtraining/metrics/__init__.py
|
klaudiapalasz/torchtraining
|
7ac54009eea2fd84aa635b6f3cbfe306f317d087
|
[
"MIT"
] | 1
|
2020-08-25T19:19:43.000Z
|
2020-08-25T19:19:43.000Z
|
torchtraining/metrics/__init__.py
|
klaudiapalasz/torchtraining
|
7ac54009eea2fd84aa635b6f3cbfe306f317d087
|
[
"MIT"
] | 1
|
2021-04-15T18:55:57.000Z
|
2021-04-15T18:55:57.000Z
|
from . import classification, distance, regression
| 25.5
| 50
| 0.823529
|
2d48d7cd829ea6d840d4f54a0c19c58b4e552344
| 18,058
|
py
|
Python
|
labs/lab10/editor/formatter.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | 8
|
2020-07-28T11:10:49.000Z
|
2021-05-29T15:27:17.000Z
|
34-Distributed Data(optional)/lab12/lab12/editor/formatter.py
|
ericchen12377/CS61A_LearningDoc
|
31f23962b0e2834795bf61eeb0f4884cc5da1809
|
[
"MIT"
] | 59
|
2019-02-16T10:36:51.000Z
|
2019-04-02T21:50:29.000Z
|
34-Distributed Data(optional)/lab12/lab12/editor/formatter.py
|
ericchen12377/CS61A_LearningDoc
|
31f23962b0e2834795bf61eeb0f4884cc5da1809
|
[
"MIT"
] | 1
|
2019-04-19T21:20:59.000Z
|
2019-04-19T21:20:59.000Z
|
from abc import ABC
from functools import lru_cache
from typing import List, Tuple, Type, Union
import lexer as lexer
from format_parser import FormatAtom, FormatComment, FormatList, Formatted, get_expression
LINE_LENGTH = 50
MAX_EXPR_COUNT = 10
MAX_EXPR_LEN = 30
INDENT = 4
DEFINE_VALS = ["define", "define-macro"]
DECLARE_VALS = ["lambda", "mu"]
SHORTHAND = {"quote": "'", "quasiquote": "`", "unquote": ",", "unquote-splicing": ",@", "variadic": "."}
MULTILINE_VALS = ["let", "cond", "if"]
FREE_TOKENS = ["if", "define", "define-macro", "mu", "lambda"]
OPEN_PARENS = ["(", "["]
CLOSE_PARENS = [")", "]"]
CACHE_SIZE = 2 ** 8
def prettify(strings: List[str], javastyle: bool = False) -> str:
out = []
for i, string in enumerate(strings):
if not string.strip():
continue
out.extend(prettify_single(string, javastyle))
raw_out = []
for expr in out:
if expr.startswith(";"):
raw_out.append(expr)
else:
raw_out.append(expr)
raw_out.append("\n")
raw_out.append("\n")
while raw_out and raw_out[-1] == "\n":
raw_out.pop()
return "".join(raw_out)
@lru_cache(CACHE_SIZE)
def prettify_single(string: str, javastyle: bool) -> List[str]:
Formatter.set_javastyle(javastyle)
out = []
buff = lexer.TokenBuffer([string], True)
while not buff.done:
expr = get_expression(buff)
out.append(ExpressionFormatter.format(expr, LINE_LENGTH).stringify())
return out
class OptimalFormattingReached(Exception):
pass
class MatchFailure(Exception):
pass
class WeakMatchFailure(MatchFailure):
pass
class StrongMatchFailure(MatchFailure):
...
class FormatSeq:
def __init__(self):
self.left: FormatOp = None
self.right: FormatOp = None
self.active = True
self.line_lengths = [0]
self.max_line_len = 0
self.cost = 0
def __add__(self, other):
if other is None:
return self
if isinstance(other, FormatSeq):
return other.__radd__(self)
return NotImplemented
def __radd__(self, other: 'FormatSeq'):
if other is None:
return self
if not other.active:
raise Exception("Attempting to manipulate inactive seqs!")
if not self.active:
raise Exception("???")
other.right.next = self.left
other.active = False
self.left = other.left
self.line_lengths[0] += other.line_lengths.pop()
self.line_lengths = other.line_lengths + self.line_lengths
self.max_line_len = max(self.max_line_len, other.max_line_len, *self.line_lengths)
if len(self.line_lengths) > 1:
self.line_lengths = [self.line_lengths[0], self.line_lengths[-1]]
return self
def contains_newline(self):
return len(self.line_lengths) > 1
def stringify(self):
pos = self.left
out = []
indent_level = 0
while pos is not None:
if isinstance(pos, _Token):
out.append(pos.value)
if pos.value == "\n":
out.append(" " * indent_level)
elif isinstance(pos, _ChangeIndent):
indent_level += pos.level
else:
raise NotImplementedError("unable to stringify " + str(type(pos)))
pos = pos.next
return "".join(out)
class FormatOp:
def __init__(self):
self.next = None
class _Token(FormatOp):
def __init__(self, value):
super().__init__()
assert isinstance(value, str)
self.value = value
class Token(FormatSeq):
def __init__(self, value):
super().__init__()
self.left = self.right = _Token(value)
self.max_line_len = self.line_lengths[0] = len(value)
class _ChangeIndent(FormatOp):
def __init__(self, level):
super().__init__()
self.level = level
class ChangeIndent(FormatSeq):
def __init__(self, level):
super().__init__()
self.left = self.right = _ChangeIndent(level)
class Newline(Token):
def __init__(self):
super().__init__("\n")
self.max_line_len = self.line_lengths[0] = 0
self.line_lengths.append(0)
class Space(Token):
def __init__(self):
super().__init__(" ")
class Formatter(ABC):
javastyle = False
@staticmethod
def format(expr: Formatted, remaining: int) -> FormatSeq:
raise NotImplementedError()
@staticmethod
def set_javastyle(javastyle: bool):
Formatter.javastyle = javastyle
class SpecialFormFormatter(Formatter, ABC):
@classmethod
def assert_form(cls, expr: Formatted, form: Union[str, List[str]]):
if isinstance(form, list):
for elem in form:
try:
cls.assert_form(expr, elem)
except WeakMatchFailure:
continue
else:
return
raise WeakMatchFailure
if not isinstance(expr, FormatList):
raise WeakMatchFailure("Special form must be list, not atom.")
if not expr.contents:
raise WeakMatchFailure("Special form must be list, not nil.")
if not isinstance(expr.contents[0], FormatAtom):
raise WeakMatchFailure("Special form must begin with a Symbol.")
if not expr.contents[0].value == form:
raise WeakMatchFailure("Call expression does not match desired special form.")
# if expr.last:
# raise StrongMatchFailure("Special form must not be dotted.")
@classmethod
def match_form(cls, expr: Formatted, form: Union[str, List[str]]):
try:
cls.assert_form(expr, form)
except WeakMatchFailure:
return False
else:
return True
@classmethod
def is_multiline(cls, expr: Formatted):
return any(cls.match_form(expr, form) for form in MULTILINE_VALS)
class AlignedCondFormatter(SpecialFormFormatter):
class Clause(Formatter):
@staticmethod
def format(expr: Formatted, remaining: int, max_pred_len: int = 0) -> FormatSeq:
if isinstance(expr, FormatComment):
return CommentFormatter.format(expr)
else:
out = Token(expr.prefix) + Token(expr.open_paren)
inlined_pred = InlineFormatter.format(expr.contents[0])
pred_len = len(expr.prefix) + inlined_pred.max_line_len
out += inlined_pred
out += Token(" " * (max_pred_len - pred_len)) + Space()
out += InlineFormatter.format(expr.contents[1])
out += Token(expr.close_paren)
return out
@staticmethod
def pred_len(expr: Formatted):
if isinstance(expr, FormatAtom):
raise WeakMatchFailure("Cond clause should not be FormatAtom")
elif isinstance(expr, FormatComment):
return 0
else:
if len(expr.contents) != 2:
raise WeakMatchFailure("Cannot auto-align expr")
pred, val = expr.contents
inlined_pred = InlineFormatter.format(pred)
return inlined_pred.max_line_len
@classmethod
def format(cls, expr: Formatted, remaining) -> FormatSeq:
cls.assert_form(expr, "cond")
max_pred_len = 0
for clause in expr.contents[1:]:
max_pred_len = max(max_pred_len, cls.Clause.pred_len(clause))
out = Token(expr.open_paren) + Token("cond") + Space() + ChangeIndent(2) + Newline()
out += rest_format(expr.contents[1:], -1, max_pred_len,
formatter=cls.Clause, indent_level=2, close_paren=expr.close_paren)
return out
class MultilineCondFormatter(SpecialFormFormatter):
class Clause(Formatter):
@staticmethod
def format(expr: Formatted, remaining: int) -> FormatSeq:
if isinstance(expr, FormatList):
return NoHangingListFormatter.format(expr, remaining)
else:
return ExpressionFormatter.format(expr, remaining)
@classmethod
def format(cls, expr: Formatted, remaining) -> FormatSeq:
cls.assert_form(expr, "cond")
out = Token(expr.open_paren) + Token("cond") + Space() + ChangeIndent(2) + Newline()
out += rest_format(expr.contents[1:], remaining - 2,
formatter=cls.Clause, indent_level=2, close_paren=expr.close_paren)
return out
class LetFormatter(SpecialFormFormatter):
class LetHandler(Formatter):
def __init__(self):
self.bindings_next = True
def format(self, expr: Formatted, remaining: int) -> FormatSeq:
if isinstance(expr, FormatList) and self.bindings_next:
self.bindings_next = False
out = NoHangingListFormatter.format(expr, remaining)
out += ChangeIndent(-3)
return out
else:
return ExpressionFormatter.format(expr, remaining)
@classmethod
def format(cls, expr: Formatted, remaining: int) -> FormatSeq:
cls.assert_form(expr, "let")
out = Token(expr.open_paren) + Token("let") + Space() + ChangeIndent(5)
let_handler = cls.LetHandler()
out += rest_format(expr.contents[1:], remaining - 6,
formatter=let_handler, indent_level=2, close_paren=expr.close_paren)
if let_handler.bindings_next:
raise WeakMatchFailure("Let statement with too few arguments")
return out
class ProcedureFormatter(SpecialFormFormatter):
class ProcedureHandler(Formatter):
def __init__(self, indent_level):
self.formals_next = True
self.indent_level = indent_level
def format(self, expr: Formatted, remaining: int) -> FormatSeq:
out = ExpressionFormatter.format(expr, remaining)
if isinstance(expr, FormatList) and self.formals_next:
self.formals_next = False
out += ChangeIndent(2 - self.indent_level)
return out
@classmethod
def format(cls, expr: Formatted, remaining: int) -> FormatSeq:
cls.assert_form(expr, DEFINE_VALS + DECLARE_VALS)
indent_level = 2 + len(expr.contents[0].value)
out = Token(expr.open_paren) + Token(expr.contents[0].value) + Space() + ChangeIndent(indent_level)
procedure_handler = cls.ProcedureHandler(indent_level)
out += rest_format(expr.contents[1:], remaining - indent_level,
formatter=procedure_handler, indent_level=2, close_paren=expr.close_paren)
if procedure_handler.formals_next:
raise WeakMatchFailure("Formals not specified")
return out
class AtomFormatter(Formatter):
@staticmethod
def format(expr: Formatted, remaining: int = None) -> FormatSeq:
if not isinstance(expr, FormatAtom):
raise WeakMatchFailure("expr is not atomic")
return Token(expr.prefix + expr.value)
class InlineFormatter(Formatter):
@staticmethod
def format(expr: Formatted, remaining: int = None) -> FormatSeq:
if isinstance(expr, FormatComment):
raise WeakMatchFailure("Cannot inline-format a comment")
if isinstance(expr, FormatAtom):
return AtomFormatter.format(expr, remaining)
if SpecialFormFormatter.is_multiline(expr):
raise WeakMatchFailure("Cannot inline-format a multiline expr")
formatted_exprs = [InlineFormatter.format(elem) for elem in expr.contents]
out = Token(expr.prefix) + Token(expr.open_paren)
for formatted_expr in formatted_exprs:
out += formatted_expr
if formatted_expr is not formatted_exprs[-1]:
out += Space()
out += Token(expr.close_paren)
return out
class ListFormatter(Formatter):
@staticmethod
def format(expr: Formatted, remaining: int) -> FormatSeq:
if not isinstance(expr, FormatList):
raise WeakMatchFailure("expr is not a list")
return find_best(expr, [InlineFormatter, PrefixedListFormatter, CallExprFormatter, NoHangingListFormatter],
remaining)
class CallExprFormatter(Formatter):
@staticmethod
def format(expr: FormatList, remaining: int) -> FormatSeq:
assert isinstance(expr, FormatList)
if len(expr.contents) <= 1:
raise WeakMatchFailure("Call expr must have at least 2 arguments, otherwise handle using DataListFormatter")
if expr.prefix:
raise WeakMatchFailure("Call expr cannot be prefixed")
if not isinstance(expr.contents[0], FormatAtom):
raise WeakMatchFailure("Unable to inline first two arguments, fallback to DataListFormatter")
return find_best(expr, [
AlignedCondFormatter,
MultilineCondFormatter,
LetFormatter,
ProcedureFormatter,
DefaultCallExprFormatter], remaining)
class PrefixedListFormatter(Formatter):
@staticmethod
def format(expr: FormatList, remaining: int):
assert isinstance(expr, FormatList)
if not expr.prefix:
raise WeakMatchFailure("Expr is not prefixed")
with expr.hold_prefix() as prefix:
if prefix == "`":
ret = ListFormatter.format(expr, remaining - 1)
else:
ret = DataFormatter.format(expr, remaining - 1)
return Token(prefix) + ChangeIndent(1) + ret + ChangeIndent(-1)
class DefaultCallExprFormatter(Formatter):
@staticmethod
def format(expr: FormatList, remaining: int) -> FormatSeq:
operator = expr.contents[0]
assert isinstance(operator, FormatAtom)
indent_level = len(operator.value) + 2
out = Token(expr.open_paren)
out += AtomFormatter.format(operator)
out += ChangeIndent(indent_level) + Space()
out += rest_format(expr.contents[1:], remaining - indent_level,
indent_level=indent_level, close_paren=expr.close_paren)
return out
class DataFormatter(Formatter):
@staticmethod
def format(expr: Formatted, remaining: int) -> FormatSeq:
if isinstance(expr, FormatComment):
return CommentFormatter.format(expr)
elif isinstance(expr, FormatAtom):
return AtomFormatter.format(expr)
else:
return NoHangingListFormatter.format(expr, remaining, DataFormatter)
class NoHangingListFormatter(Formatter):
@staticmethod
def format(expr: Formatted, remaining: int, callback: Type[Formatter] = None) -> FormatSeq:
if callback is None:
callback = ExpressionFormatter
if expr.prefix:
raise WeakMatchFailure("Cannot format prefixed datalist")
out = Token(expr.open_paren) + ChangeIndent(1)
out += rest_format(expr.contents, remaining - 1,
formatter=callback, indent_level=1, close_paren=expr.close_paren)
return out
class CommentFormatter(Formatter):
@staticmethod
def format(expr: Formatted, remaining: int = None) -> FormatSeq:
if not isinstance(expr, FormatComment):
raise WeakMatchFailure("Expr is not a comment")
leading_space = "" if expr.value.startswith(" ") else " "
return Token(expr.prefix + ";" + leading_space + expr.value)
class ExpressionFormatter(Formatter):
@staticmethod
def format(expr: Formatted, remaining: int) -> FormatSeq:
candidates = [AtomFormatter, ListFormatter, CommentFormatter]
return find_best(expr, candidates, remaining)
class Best:
def __init__(self, remaining):
self.curr_best = None
self.curr_cost = None
self.remaining = remaining
def heuristic(self, chain: FormatSeq) -> int:
return max(0, chain.max_line_len - 50) + chain.cost
def add(self, formatted: FormatSeq):
cost = self.heuristic(formatted)
if self.curr_cost is None or cost < self.curr_cost:
self.curr_best = formatted
self.curr_cost = cost
if cost == 0:
raise OptimalFormattingReached()
def get_best(self) -> FormatSeq:
assert self.curr_best is not None
return self.curr_best
def find_best(raw: Formatted, candidates: List[Type[Formatter]], remaining) -> FormatSeq:
best = Best(remaining)
for candidate in candidates:
try:
best.add(candidate.format(raw, remaining))
except WeakMatchFailure as e:
continue
except StrongMatchFailure:
# TODO: Warn about potentially invalid special form
continue
except OptimalFormattingReached:
return best.get_best()
return best.get_best()
def rest_format(exprs: List[Formatted],
*args,
formatter: Union[Formatter, Type[Formatter]] = ExpressionFormatter,
indent_level: int,
close_paren: str) -> Tuple[FormatSeq, bool]:
out = None
i = 0
while i != len(exprs):
curr_expr = exprs[i]
i += 1
formatted_expr = formatter.format(curr_expr, *args)
if "not formatted_expr.contains_newline()" and i != len(exprs) \
and not isinstance(curr_expr, FormatComment) \
and isinstance(exprs[i], FormatComment) \
and exprs[i].allow_inline:
inline_comment = exprs[i]
formatted_expr += Space() + CommentFormatter.format(inline_comment)
i += 1
out += formatted_expr if i == len(exprs) else formatted_expr + Newline()
ends_with_comment = exprs and isinstance(exprs[-1], FormatComment)
out += ChangeIndent(-indent_level)
if ends_with_comment or Formatter.javastyle:
out += Newline()
out += Token(close_paren)
return out
| 33.502783
| 120
| 0.622328
|
15672cf7391b56a02ae52c4b872faff8c5e2b1c1
| 4,741
|
py
|
Python
|
spec/test/stored_queries/test_ws.py
|
dakotablair/relation_engine
|
afd0a65fbf9e5da80a63264762111e8ee679795c
|
[
"MIT"
] | null | null | null |
spec/test/stored_queries/test_ws.py
|
dakotablair/relation_engine
|
afd0a65fbf9e5da80a63264762111e8ee679795c
|
[
"MIT"
] | null | null | null |
spec/test/stored_queries/test_ws.py
|
dakotablair/relation_engine
|
afd0a65fbf9e5da80a63264762111e8ee679795c
|
[
"MIT"
] | null | null | null |
"""
Tests for workspace stored queries under the ws* namespace
These tests run within the re_api docker image, and require access to the ArangoDB, auth, and workspace images.
"""
import unittest
import json
import requests
from spec.test.helpers import get_config, create_test_docs, check_spec_test_env
_CONF = get_config()
def _ws_obj(wsid, objid, ver, is_public=True):
"""Create data for a dummy test workspace obj"""
return {
'_key': ':'.join((str(n) for n in (wsid, objid, ver))),
'name': 'obj',
'workspace_id': wsid,
'object_id': objid,
'version': ver,
'hash': 'x',
'size': 0,
'epoch': 0,
'deleted': False,
'is_public': is_public,
}
class TestWs(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Create all test data.
"""
check_spec_test_env()
ws_object_version = [
_ws_obj(1, 1, 1), # root/origin object
_ws_obj(1, 2, 1), # copy object
_ws_obj(1, 3, 1), # provenance object
_ws_obj(1, 4, 1), # reference object
_ws_obj(1, 5, 1, is_public=False), # private copy obj
_ws_obj(1, 6, 1, is_public=False), # private prov obj
_ws_obj(1, 7, 1, is_public=False), # private ref obj
]
create_test_docs('ws_object_version', ws_object_version)
ws_type_version = [{'_key': 'Module.Type1-1.0'}]
create_test_docs('ws_type_version', ws_type_version)
ws_obj_instance_of_type = [
{'_from': 'ws_object_version/1:1:1', '_to': 'ws_type_version/Module.Type1-1.0'},
{'_from': 'ws_object_version/1:2:1', '_to': 'ws_type_version/Module.Type1-1.0'},
{'_from': 'ws_object_version/1:3:1', '_to': 'ws_type_version/Module.Type1-1.0'},
{'_from': 'ws_object_version/1:4:1', '_to': 'ws_type_version/Module.Type1-1.0'},
]
create_test_docs('ws_obj_instance_of_type', ws_obj_instance_of_type)
ws_prov_descendant_of = [
{'_from': 'ws_object_version/1:1:1', '_to': 'ws_object_version/1:3:1'},
{'_from': 'ws_object_version/1:1:1', '_to': 'ws_object_version/1:6:1'},
]
create_test_docs('ws_prov_descendant_of', ws_prov_descendant_of)
ws_refers_to = [
{'_from': 'ws_object_version/1:1:1', '_to': 'ws_object_version/1:4:1'},
{'_from': 'ws_object_version/1:1:1', '_to': 'ws_object_version/1:7:1'},
]
create_test_docs('ws_refers_to', ws_refers_to)
ws_copied_from = [
{'_from': 'ws_object_version/1:1:1', '_to': 'ws_object_version/1:2:1'},
{'_from': 'ws_object_version/1:1:1', '_to': 'ws_object_version/1:5:1'},
]
create_test_docs('ws_copied_from', ws_copied_from)
def test_fetch_related_data_valid(self):
"""
Test for the basic happy path.
This also covers the case of private-scope object results, which will be hidden from results.
"""
resp = requests.post(
_CONF['re_api_url'] + '/api/v1/query_results',
params={'stored_query': 'ws_fetch_related_data', 'show_public': True},
data=json.dumps({'obj_key': '1:1:1'})
).json()
self.assertEqual(resp['count'], 1)
self.assertEqual(resp['has_more'], False)
res = resp['results'][0]
# Check the root object results
self.assertEqual(res['obj']['_key'], '1:1:1')
self.assertEqual(res['obj_type']['_key'], 'Module.Type1-1.0')
# Check the copy results
self.assertEqual(res['copies']['count'], 1)
self.assertEqual(len(res['copies']['data']), 1)
self.assertEqual(res['copies']['data'][0]['data']['_id'], 'ws_object_version/1:2:1')
self.assertEqual(res['copies']['data'][0]['hops'], 1)
self.assertEqual(res['copies']['data'][0]['type']['_id'], 'ws_type_version/Module.Type1-1.0')
# Check the provenance results
self.assertEqual(res['prov']['count'], 1)
self.assertEqual(len(res['prov']['data']), 1)
self.assertEqual(res['prov']['data'][0]['data']['_id'], 'ws_object_version/1:3:1')
self.assertEqual(res['prov']['data'][0]['hops'], 1)
self.assertEqual(res['prov']['data'][0]['type']['_id'], 'ws_type_version/Module.Type1-1.0')
# Check the ref results
self.assertEqual(res['refs']['count'], 1)
self.assertEqual(len(res['refs']['data']), 1)
self.assertEqual(res['refs']['data'][0]['data']['_id'], 'ws_object_version/1:4:1')
self.assertEqual(res['refs']['data'][0]['hops'], 1)
self.assertEqual(res['refs']['data'][0]['type']['_id'], 'ws_type_version/Module.Type1-1.0')
| 43.1
| 111
| 0.593968
|
72d095e100b4fdc7bb5236094f57f1b329d633dc
| 176
|
py
|
Python
|
mall/apps/orders/urls.py
|
DanaLee1990/meiduo
|
a2342cfd35829b7ef40ef3c1f1731eb95658d3e8
|
[
"MIT"
] | null | null | null |
mall/apps/orders/urls.py
|
DanaLee1990/meiduo
|
a2342cfd35829b7ef40ef3c1f1731eb95658d3e8
|
[
"MIT"
] | null | null | null |
mall/apps/orders/urls.py
|
DanaLee1990/meiduo
|
a2342cfd35829b7ef40ef3c1f1731eb95658d3e8
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^places/$',views.OrderSettlementView.as_view()),
url(r'^$', views.OrderView.as_view()),
]
| 17.6
| 58
| 0.676136
|
1d40362f2c426a0320ab9da41875c03522df29ca
| 1,486
|
py
|
Python
|
leetcode/0840_magic_squares_in_grid.py
|
chaosWsF/Python-Practice
|
ff617675b6bcd125933024bb4c246b63a272314d
|
[
"BSD-2-Clause"
] | null | null | null |
leetcode/0840_magic_squares_in_grid.py
|
chaosWsF/Python-Practice
|
ff617675b6bcd125933024bb4c246b63a272314d
|
[
"BSD-2-Clause"
] | null | null | null |
leetcode/0840_magic_squares_in_grid.py
|
chaosWsF/Python-Practice
|
ff617675b6bcd125933024bb4c246b63a272314d
|
[
"BSD-2-Clause"
] | null | null | null |
"""
A 3 x 3 magic square is a 3 x 3 grid filled with distinct numbers from 1 to 9 such that each row, column, and both diagonals all
have the same sum. Given an grid of integers, how many 3 x 3 "magic square" subgrids are there? (Each subgrid is contiguous).
Example 1:
Input: [[4,3,8,4],
[9,5,1,9],
[2,7,6,2]]
Output: 1
Explanation:
The following subgrid is a 3 x 3 magic square:
4 3 8
9 5 1
2 7 6
while this one is not:
3 8 4
5 1 9
7 6 2
In total, there is only one magic square inside the given grid.
Note:
1. 1 <= grid.length <= 10
2. 1 <= grid[0].length <= 10
3. 0 <= grid[i][j] <= 15
"""
class Solution:
def numMagicSquaresInside(self, grid):
def checker(a, b, c, d, e, f, g, h, i):
"""http://www.mathematische-basteleien.de/magsquare.htm"""
return (
e == 5 and
sorted([a, b, c, d, e, f, g, h, i]) == [1, 2, 3, 4, 5, 6, 7, 8, 9] and
(a+b+c == d+e+f == g+h+i == a+d+g == b+e+h == c+f+i == a+e+i == c+e+g == 15)
)
return sum(
checker(
grid[r][c], grid[r][c+1], grid[r][c+2],
grid[r+1][c], grid[r+1][c+1], grid[r+1][c+2],
grid[r+2][c], grid[r+2][c+1], grid[r+2][c+2]
) for c in range(len(grid[0]) - 2) for r in range(len(grid) - 2)
)
| 32.304348
| 129
| 0.466353
|
ec449aae4e9d54b4ed82b7aab8e4cca5345aa412
| 23,418
|
py
|
Python
|
isilon_capacity_calc_py3.py
|
adamgweeks/Isilon-PowerScale-capacity-calculator
|
04df4604677e9f3faf409d27b2c55d40aec91b53
|
[
"MIT"
] | null | null | null |
isilon_capacity_calc_py3.py
|
adamgweeks/Isilon-PowerScale-capacity-calculator
|
04df4604677e9f3faf409d27b2c55d40aec91b53
|
[
"MIT"
] | 1
|
2020-11-10T13:12:23.000Z
|
2020-11-10T13:12:23.000Z
|
isilon_capacity_calc_py3.py
|
adamgweeks/Isilon-PowerScale-capacity-calculator
|
04df4604677e9f3faf409d27b2c55d40aec91b53
|
[
"MIT"
] | null | null | null |
# Python script to calc Isilon file space usage
# written by Adam.Weeks@dell.com
# unofficial and NOT supported by Dell Technologies/EMC/Isilon!
# example useage: python isilon_space_calc_py2.py /Users/user1/Documents/ -s 9 -p N+2
# see https://github.com/adamgweeks/Isilon-capacity-calculator
#
# for Python 2!
from datetime import datetime # get script start time
startTime = datetime.now() # script timed as this could take a while!
import math
#take in cmd line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="source directory (will scan this dir and all subdirs from this point)")
parser.add_argument("--node_pool_size","-s", help="the node pool size (number of nodes)",type=int,required=True)
parser.add_argument("--protection","-p", help="data protection level, defaults to: N+2:1",default="N+2:1",required=True)
parser.add_argument("--units","-u", help="output data units (KB,MB,TB,PB,H), default=H (H=human/auto sizing)",default="H")
parser.add_argument("--verbose","-v", help="show individual file size comparisson",action="store_true")
parser.add_argument("--metadata_stuffer_size","-mss", help="specify the estimated additional metadata overhead (ADS etc)",type=int,default=3584)
parser.add_argument("--csv","-c", help="verbose output as CSV file",action="store_true")
parser.add_argument("--gen6","-g6", help="GEN 6 mode",action="store_true")
# human filesizing function
def human_size(size_in_b):
out_size=float()
out_size=(size_in_b/(1024*1024*1024*1024*1024))
if out_size>=1:
#units="PB"
output=[out_size,'PB']
return(output)
else:
out_size=(size_in_b/(1024*1024*1024*1024))
if out_size>=1:
#units="TB"
output=[out_size,'TB']
return(output)
#print "outsize is ",out_size,units
else:
out_size=(size_in_b/(1024*1024*1024))
if out_size>=1:
output=[out_size,'GB']
return(output)
else:
out_size=(size_in_b/(1024*1024))
if out_size>=1:
output=[out_size,'MB']
return(output)
else:
output=[size_in_b/1024,'KB']
return(output)
#progress bar function
def progress(end_val, bar_length,prog):
percent = float(prog) / end_val
hashes = '#' * int(round(percent * bar_length))
spaces = ' ' * (bar_length - len(hashes))
if(prog==end_val):
sys.stdout.write("\rPercent: [{0}] Done!".format(hashes + spaces, int(round(percent * 100))))
sys.stdout.write("({} of {} files)".format(prog,end_val))
else:
sys.stdout.write("\rPercent: [{0}] {1}%".format(hashes + spaces, int(round(percent * 100))))
sys.stdout.write("({} of {} files)".format(prog,end_val))
sys.stdout.flush()
#setup the vars needed for calculations
args = parser.parse_args()
dirname=args.directory
protection_string=args.protection
node_pool_size=args.node_pool_size
meta_stuffer=args.metadata_stuffer_size
data_units=args.units
verbose=args.verbose
csv=args.csv
gen6=args.gen6
total_empty_files=0
total_small_files=0
total_partial_files=0
total_perfect_files=0
total_large_files=0
block_size=0
total_filesize_small_isilon=0
total_filesize_partial_isilon=0
total_filesize_perfect_isilon=0
total_filesize_large_isilon=0
total_filesize_small_orig=0
total_filesize_partial_orig=0
total_filesize_perfect_orig=0
total_filesize_large_orig=0
mbig_files=0
msmall_files=0
if csv==True:
verbose=True
#translate output units into divisible number (from bytes to x units)
data_units=data_units.upper()
if data_units=="KB":
odata_units=""
data_divider=1024
elif data_units=="MB":
odata_units=""
data_divider=1024*1024
elif data_units=="GB":
odata_units=""
data_divider=1024*1024*1024
elif data_units=="TB":
odata_units=""
data_divider=1024*1024*1024*1024
elif data_units=="PB":
odata_units=""
data_divider=1024*1024*1024*1024*1024
elif data_units=="H":
odata_units="H"
data_divider=1
else :
print("Data units size not recognised")
exit()
if gen6==True: #GEN 6 uses a smaller 10 node ideal disk pool size, so has a tendancy to restrict the stripe width more than previous generations
#see http://isilon-additonal.info - Disk Pools for more info.
#check for even number of nodes and that we have at least 4!
if node_pool_size<4:
print("Error! Minumum of 4 nodes required for GEN 6 clusters!")
exit()
if node_pool_size % 2 != 0:
print("Error! Node pool must have an even number of nodes in GEN 6 clusters!")
exit()
if node_pool_size>20:
pool_count=math.floor(node_pool_size/10)#how many disk pools will we have?
rounded=node_pool_size-(pool_count*10)#is there a leftover from dividing into 10 node disk pools?
node_pool_size=rounded+10
#print "disk pool size:", node_pool_size
#translate requested protection string into meaning for script
protection_string=protection_string.lower()
if protection_string=="n+1":
requested_protection=1
stripe_requested=True
elif protection_string=="n+2":
requested_protection=2
stripe_requested=True
elif protection_string=="n+3":
requested_protection=3
stripe_requested=True
elif protection_string=="n+4":
requested_protection=4
stripe_requested=True
elif protection_string=="n+2:1":
requested_protection=2
stripe_requested=True
node_pool_size=(node_pool_size * 2)
elif protection_string=="n+3:1":
requested_protection=3
stripe_requested=True
node_pool_size=(node_pool_size * 3)
elif protection_string=="n+4:1":
requested_protection=4
stripe_requested=True
node_pool_size=(node_pool_size * 4)
elif protection_string=="n+3:11":
requested_protection=3
stripe_requested=True
node_pool_size=(node_pool_size * 2)
elif protection_string=="n+4:2":
requested_protection=4
stripe_requested=True
node_pool_size=(node_pool_size * 2)
elif protection_string=="2x":
requested_protection=2
stripe_requested=False
elif protection_string=="n+1n":
requested_protection=1
stripe_requested=True
elif protection_string=="n+2n":
requested_protection=2
stripe_requested=True
elif protection_string=="n+3n":
requested_protection=3
stripe_requested=True
elif protection_string=="n+4n":
requested_protection=4
stripe_requested=True
elif protection_string=="n+2d:1n":
requested_protection=2
stripe_requested=True
node_pool_size=(node_pool_size * 2)
elif protection_string=="n+3d:1n":
requested_protection=3
stripe_requested=True
node_pool_size=(node_pool_size * 3)
elif protection_string=="n+4d:1n":
requested_protection=4
stripe_requested=True
node_pool_size=(node_pool_size * 4)
elif protection_string=="n+3d:1d1n":
requested_protection=3
stripe_requested=True
node_pool_size=(node_pool_size * 2)
elif protection_string=="n+4d:2n":
requested_protection=4
stripe_requested=True
node_pool_size=(node_pool_size * 2)
elif protection_string=="2x":
requested_protection=2
stripe_requested=False
elif protection_string=="3x":
requested_protection=3
stripe_requested=False
elif protection_string=="4x":
requested_protection=4
stripe_requested=False
elif protection_string=="5x":
requested_protection=5
stripe_requested=False
elif protection_string=="6x":
requested_protection=6
stripe_requested=False
elif protection_string=="7x":
requested_protection=7
stripe_requested=False
elif protection_string=="8x":
requested_protection=8
stripe_requested=False
else:
print("unrecognised protection type")
exit()
#setup vars used later in script
total=0
filesizes=[]
filenames=[]
total_size=0
total_size=float(total_size)
total_original_size=0
total_original_size=float(total_original_size)
t_total=0
import os
import sys
#do some sanity checks on given arguments
#check if DIR exists
if os.path.isdir(dirname) is False:
print("Error! directory:'",dirname,"' doesn't appear to exist.")
exit()
#check if directory is readable
if os.access(dirname, os.R_OK):
print("You are able to read the ",dirname," dir")
else:
print("Error! dir:",dirname," is not readable.")
exit()
#if the node pool size is greater than the max stripe size, limit it TO the maximum stripe size
if (node_pool_size - requested_protection)>16:
node_pool_size=(16 + requested_protection)
#check striping will work with the node pool size given
if stripe_requested==True:
#valid_min_size=(requested_protection+1)+requested_protection #could have used easier logic (2 x RP + 1) but wanted to match more to the human logic used (Must be enough nodes for more DUs than FECs).
valid_min_size=requested_protection*2 #Now the protection only needs to be equal DUs to FECs (in newer versions of OneFS, I believe it's 7.1 and higher)
if node_pool_size<valid_min_size:
print("Node pool is too small for requested protection to work!")
exit()
i=0 #ready for progress function
polear=['/','|','\\','-'] #ready for showing the metadata read is still working!
polepos=0
if csv==False:
print("Reading metadata...")
metaTime = datetime.now() #timing how long the metadata read took
files_to_process=0# for progress indicator, so we know the total number of files later
dirs_to_process=0 # for counting inodes (to indicate metadata size)
for root, dirs, files in os.walk(dirname): #go and retrieve a list of all the files in the given DIR
for dir in dirs:
dirpath = os.path.join(root, dir)
if os.path.isdir(dirpath): # check this is a DIR (to count the inodes)
dirs_to_process=dirs_to_process+1
for filename in files:
if csv==False:
polepos=polepos+1
if (polepos>3):
polepos=0
pole=polear[polepos]
sys.stdout.write("\r{0}".format(pole))
sys.stdout.flush()
filepath = os.path.join(root, filename)
if os.path.isfile(filepath): # check this is a file (i.e. not a link)
files_to_process=files_to_process+1 # used later for progress bar
file_size=os.path.getsize(filepath)
filesizes.append(file_size) # add to file size for this file to the list
if(file_size>10485760):
mbig_files=mbig_files+1 # if the file is over 10MB it will get an 8KB inode (rule of thumb from https://community.emc.com/thread/178281?start=15&tstart=0)
else:
msmall_files=msmall_files+1
#filesizes.append((os.stat(filepath).st_blocks * 512)) #new alternative sizing, to match disk blocks size on Isilon (and most disks/OS configs)
if verbose==True:
filenames.append(filename)
sys.stdout.write("\r") # clear line used for the 'moving line'
sys.stdout.flush()
# change to numbers to process (dirs+1) files as is:
dirmcount = dirs_to_process
#filemcount = files_to_process
if stripe_requested:
dirmcount = dirmcount * (requested_protection + 2) # DIRs get an extra inode mirror by default
mbig_files=mbig_files * (requested_protection + 1) # metadata is always mirrored, but we have to mirror again if it's striped (to match the striping protection level)
msmall_files=msmall_files * (requested_protection + 1) # metadata is always mirrored, but we have to mirror again if it's striped (to match the striping protection level)
else:
dirmcount = dirmcount * (requested_protection + 1)
mbig_files=mbig_files * requested_protection # if data is mirrored we simply mirror the metadata
msmall_files=msmall_files * requested_protection # if data is mirrored we simply mirror the metadata
metadata_size=(dirmcount * 8192)+(msmall_files * 512)+(mbig_files * 8192)+(meta_stuffer*(msmall_files+mbig_files+dirmcount))
#print "got msize:",metadata_size
#exit()
#metadata_size=(filemcount + dirmcount) * ibs_size
total_size=total_size + metadata_size # tally up metadata size
if odata_units=="H":
output=human_size(metadata_size)
metadata_size=output[0]
data_units=output[1]
else:
metadata_size=metadata_size/data_divider
metadata_size=round(metadata_size,4) # (rounded to 3 decimal places for ease of reading)
print("Read metadata for ",dirs_to_process," DIRs and ",files_to_process," files in (H:M:S:ms):",datetime.now() - startTime) # show how long this took and how many files we have (really just for reference)
output=human_size(meta_stuffer)
mss_h_size=output[0]
mss_h_units=output[1]
i=0 #for progress bar
print("")
if csv==False:
print("Calculating filesizes...")
if verbose==True:
if csv==False:
print("")
print("Filename | Original size (KB) | Isilon size (KB)")
else:
print("")
print("")
print("Isilon space calculator report for ",dirname,"with ", node_pool_size ," nodes using ",protection_string," protection")
print("")
print("")
print("Filename,Original size (KB),Isilon size(KB)")
calcTime = datetime.now() # for timing how long the processing takes
# go through each file in the list and we'll work out how much protection detail Isilon would add (for given cluster size and protection setting used)
for file_size in filesizes:
file_type=""
i=i+1
if verbose==False:
progress(files_to_process,40,i)# show progress bar
#file_size=file_size/1024 # convert KB first
# round up to ceiling 8kb (Isilon uses an 8KB filesystem block size, so we need to round up)
if (block_size==0):
testfs=file_size
try:
block_size=os.statvfs(dirname).f_frsize #try to find the native FS block size using Unix stats command (will fail in Windows based OS)
except AttributeError: # if above command fails, let's try finding the native FS block size using Windows native DLL instead
import ctypes
sectorsPerCluster = ctypes.c_ulonglong(0)
bytesPerSector = ctypes.c_ulonglong(0)
rootPathName = ctypes.c_wchar_p(dirname)
ctypes.windll.kernel32.GetDiskFreeSpaceW(rootPathName,
ctypes.pointer(sectorsPerCluster),
ctypes.pointer(bytesPerSector),
None,
None,
)
spc=sectorsPerCluster.value
bps=bytesPerSector.value
block_size = spc * bps
#Round all the filesize calculations (for the original data size) to the blocksize of the native filesystem (of the system this script is running on)
#block_size=8192 # just for testing (to match oneFS block size)
testfs=file_size
if (file_size>0):
file_size=int(block_size * round(float(testfs)/block_size))
if(file_size<testfs):
file_size=testfs + block_size
#end of pre-rounding test
total_original_size=file_size+total_original_size # totting up the total size of the original files
osize=file_size # for verbose output
if file_size==0:
total_empty_files+=1 # increment the number of empty files
else :
remainder=0
# round up to ceiling 8kb (Isilon uses an 8KB filesystem block size, so we need to round up)
rounded_file_size=int(8192 * round(float(file_size)/8192))
if(rounded_file_size<file_size):
rounded_file_size=rounded_file_size + 8192
# if mirroring protection was requested we simply need to multiply the rounded size (no need for complex stripe calc
if stripe_requested==False:
file_size=rounded_file_size * requested_protection
remainder_size=0
# if striping was requested we have to do a more complex calc
else:
#check if the file is 'small' (i.e. less than, or equal to 128KB), if it is small it will be mirrored
if rounded_file_size<=131072:
total_small_files += 1 #increment the counter for small files
T_requested_protection = requested_protection + 1
file_size=rounded_file_size * T_requested_protection
remainder_size=0
file_type="small"
else:
# as file is larger than 128KB (and we've already checked for a mirroring request), we'll have to stripe the data
DU_count=float(rounded_file_size)/131072 # work out how many DUs (Data Units) will be needed
#check if DU_count is integer (if not we have a partial DU)
if (float(DU_count)).is_integer():
overspill=0 # overspill is how much we need to remove from the end of the LAST DU, if it divides perfectly there will be no overspill to remove
else:
#we have a partial DU
DU_count=int(DU_count)
overspill=131072-(rounded_file_size - (int(DU_count)*131072)) # our last DU will not really be complete, so how much do we remove? (the overspill value)
actual_stripe_size=node_pool_size - requested_protection # get the stripe size (for DUs) available
no_stripes=float(0)
no_stripes=DU_count/float(actual_stripe_size)# how many stripes do we need (not necessarily an integer result)
rounded_stripes=int(no_stripes)
remainder_size=rounded_file_size - ((actual_stripe_size * rounded_stripes) * 131072)# data left over (from partial)
#if (no_stripes<=1) and (no_stripes>0): #we don't have a full stripe here, so no need to calculate the full stripes size.
if (no_stripes==1) and (remainder_size>0): # we have just over 1 stripe (one a bit at least!)
total_large_files+= 1 # increment the counter for large files
file_type="large"
rounded_stripes=int(no_stripes) # round up the number of stripes by converting to an integer (we will handle the 'overspill' of writing a full stripe later)r
rounded=False
full_stripes_size=((actual_stripe_size * rounded_stripes) + (requested_protection * rounded_stripes)) * 131072 # how would the stripes be written (taking into account the node pool size and protection
elif (no_stripes<1) and (no_stripes>0): # we have less than 1 complete stripe
total_partial_files+=1 # increment the number of partial files
file_type="partial"
no_stripes=1
full_stripes_size=0
rounded=True
elif (no_stripes==1) and (overspill==0) and (remainder_size==0): # we have a perfect stripe!
total_perfect_files+=1 # increment the number of perfect stripe files
file_type="perfect"
rounded=False
else: # we have more than 1 stripe
total_large_files+= 1 #increment the counter for large files
file_type="large"
rounded_stripes=int(no_stripes) # round up the number of stripes by converting to an integer (we will handle the 'overspill' of writing a full stripe later)
rounded=False
full_stripes_size=((actual_stripe_size * rounded_stripes) + (requested_protection * rounded_stripes)) * 131072 # how would the stripes be written (taking into account the node pool size and protection)
# check for overspill
if(overspill>0):
#remainder_size=0
if rounded==True:
remainder_size=rounded_file_size
else:
remainder_size=rounded_file_size - ((actual_stripe_size * rounded_stripes) * 131072)# data left over (from partial)
#calculate the 'remainder' stripe that needs to be written
#do we need to mirror the remainder?
if (remainder_size<=131072) and (remainder_size>0):
T_requested_protection = requested_protection + 1
remainder_size=(remainder_size * T_requested_protection)
file_size=remainder_size + full_stripes_size
elif (remainder_size>131072) and (remainder_size>0):
#remainder is big enough to form final stripe
remainder_size=((remainder_size + (requested_protection * 131072)))
file_size=remainder_size + full_stripes_size
else :
#we have a perfect stripe
file_size=(actual_stripe_size + requested_protection) * 131072
if verbose==True:
filename=filenames[(i-1)]
osize_s=str(((osize/1024)))
file_size_s=str((file_size/1024))
if csv==False:
osize_s=osize_s.rjust(15)
filename=filename.ljust(50)
file_size_s=file_size_s.ljust(15)
print(filename,":",osize_s," - ",file_size_s)
else:
print(filename,",",osize_s,",",file_size_s)
t_total=total_size
total_size=(t_total+file_size)
t_total=total_size
if file_type=="small" :
total_filesize_small_orig=total_filesize_small_orig + osize
total_filesize_small_isilon=total_filesize_small_isilon + file_size
elif file_type=="partial" :
total_filesize_partial_orig=total_filesize_partial_orig + osize
total_filesize_partial_isilon=total_filesize_partial_isilon + file_size
elif file_type=="perfect" :
total_filesize_perfect_orig=total_filesize_perfect_orig + osize
total_filesize_perfect_isilon=total_filesize_perfect_isilon + file_size
elif file_type=="large" :
total_filesize_large_orig=total_filesize_large_orig + osize
total_filesize_large_isilon=total_filesize_large_isilon + file_size
if i<=0:
print("Error! Directory is empty, nothing to show!")
exit()
# calc percentage difference
diff=((total_size / float(total_original_size))*100)-100
diff=round(diff,2) # (rounded to 2 decimal places for ease of reading)
if odata_units=="H":
output=human_size(total_original_size)
totemp=output[0]
data_units=output[1]
else:
totemp=total_original_size/data_divider
totemp=round(totemp,2)
#show the results of all this (timings are more for reference as this could take hours/days!)
print("")
print("")
output=human_size(block_size)
block_size=output[0]
mdata_units=output[1]
print("Original data size is: ",totemp,data_units," (given native block size of",block_size,mdata_units,").")
if odata_units=="H":
output=float()
total_size=float(total_size)
output=human_size(total_size)
total_size=output[0]
data_units=output[1]
else:
#total_size=float(total_size)
total_size=total_size/data_divider
#total_size=total_size+metadata_size
total_size=round(total_size,2)
print("Isilon size is : ", total_size,data_units)
print("A protection overhead of ",diff,"% - percentage of additional protection data")
print("")
print("Calculation time (H:M:S:ms): ",datetime.now() - calcTime)
print("")
print("Data breakdown:")
print("Metdata size for Isilon will be:",metadata_size,data_units,"(with a metadata stuffer size of ",mss_h_size,mss_h_units,"per file)")
print("Empty files (0 bytes):",total_empty_files)
output=human_size(total_filesize_small_isilon)
total_size_isilon=round(float(output[0]),2)
data_units_isilon=output[1]
output=human_size(total_filesize_small_orig)
total_size_orig=round(float(output[0]),2)
data_units_orig=output[1]
print("Small files (128KB or less): ",total_small_files,"size orig:",total_size_orig,data_units_orig,"Isilon size:",total_size_isilon,data_units_isilon)
if total_perfect_files>0:
output=human_size(total_filesize_perfect_isilon)
total_size_isilon=round(float(output[0]),2)
data_units_isilon=output[1]
output=human_size(total_filesize_perfect_orig)
total_size_orig=round(float(output[0]),2)
data_units_orig=output[1]
print("Perfect stripe (exactly 1 stripe) files:",total_perfect_files,"size orig:",total_size_orig,data_units_orig,"Isilon size:",total_size_isilon,data_units_isilon)
output=human_size(total_filesize_partial_isilon)
total_size_isilon=round(float(output[0]),2)
data_units_isilon=output[1]
output=human_size(total_filesize_partial_orig)
total_size_orig=round(float(output[0]),2)
data_units_orig=output[1]
print("Partial files (less than one complete stripe): ",total_partial_files,"size orig:",total_size_orig,data_units_orig,"Isilon size:",total_size_isilon,data_units_isilon)
output=human_size(total_filesize_large_isilon)
total_size_isilon=round(float(output[0]),2)
data_units_isilon=output[1]
output=human_size(total_filesize_large_orig)
total_size_orig=round(float(output[0]),2)
data_units_orig=output[1]
print("Large files (more than 1 full stripe): ",total_large_files,"size orig:",total_size_orig,data_units_orig,"Isilon size:",total_size_isilon,data_units_isilon)
print("Total :",files_to_process," files")
print("Total running time (H:M:S:ms):",datetime.now() - startTime)
| 38.264706
| 217
| 0.747246
|
23f6b00cb880239c782e34076625c18593e18c16
| 747
|
py
|
Python
|
tbx/core/migrations/0115_delete_tshirt_page.py
|
elviva404/wagtail-torchbox
|
718d9e2c4337073f010296932d369c726a01dbd3
|
[
"MIT"
] | 103
|
2015-02-24T17:58:21.000Z
|
2022-03-23T08:08:58.000Z
|
tbx/core/migrations/0115_delete_tshirt_page.py
|
elviva404/wagtail-torchbox
|
718d9e2c4337073f010296932d369c726a01dbd3
|
[
"MIT"
] | 145
|
2015-01-13T17:13:43.000Z
|
2022-03-29T12:56:20.000Z
|
tbx/core/migrations/0115_delete_tshirt_page.py
|
elviva404/wagtail-torchbox
|
718d9e2c4337073f010296932d369c726a01dbd3
|
[
"MIT"
] | 57
|
2015-01-03T12:00:37.000Z
|
2022-02-09T13:11:30.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-21 13:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("wagtailredirects", "0006_redirect_increase_max_length"),
("wagtailforms", "0003_capitalizeverbose"),
("wagtailsearchpromotions", "0002_capitalizeverbose"),
("wagtailcore", "0040_page_draft_title"),
("torchbox", "0114_remove_standardpage_show_in_play_menu"),
]
operations = [
migrations.RemoveField(model_name="tshirtpage", name="main_image",),
migrations.RemoveField(model_name="tshirtpage", name="page_ptr",),
migrations.DeleteModel(name="TshirtPage",),
]
| 32.478261
| 76
| 0.69344
|
bd36e2b5687d8d39b195e56b44653f4ec0abdb74
| 16,651
|
py
|
Python
|
ResearchPlanner/GUI.py
|
anderskm/Robotti-Research-Planner
|
7008f6e4b55bc065d80812d076084baa8a65cd82
|
[
"MIT"
] | 1
|
2021-02-09T20:53:21.000Z
|
2021-02-09T20:53:21.000Z
|
ResearchPlanner/GUI.py
|
anderskm/Robotti-Research-Planner
|
7008f6e4b55bc065d80812d076084baa8a65cd82
|
[
"MIT"
] | null | null | null |
ResearchPlanner/GUI.py
|
anderskm/Robotti-Research-Planner
|
7008f6e4b55bc065d80812d076084baa8a65cd82
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from PyQt5 import QtGui
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5.QtWidgets import QMainWindow, QDialog, QApplication, QDialogButtonBox, QVBoxLayout, QFormLayout, QCheckBox, QSpinBox, QDoubleSpinBox, QFileDialog, QLabel, QAction, qApp, QWidget, QMenu
import sys
import Plan as ResearchPlan
class GUI():
def __init__(self):
self.app = QApplication([])
self.app.setApplicationName("Robotti Research Planner")
self.app.setOrganizationName("Aarhus University")
self.app.setOrganizationDomain("agro.au.dk")
class ImportFileDialog(QFileDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setFileMode(QFileDialog.ExistingFile)
def get_file(self, folder=None, caption='Select file', filter='All files (*.*)'):
file_tuple = self.getOpenFileName(caption=caption, filter=filter)
if not file_tuple[0]:
output=None
raise UserWarning('"' + caption + '" cancelled by user.')
else:
output = file_tuple[0]
return output
class ExportFileDialog(QFileDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setFileMode(QFileDialog.AnyFile)
def get_file(self, folder=None, caption='Save file', filter='JSON (*.json);;All files (*.*)'):
file_tuple = self.getSaveFileName(caption=caption, filter=filter)
if not file_tuple[0]:
output=None
raise UserWarning('"' + caption + '" cancelled by user.')
else:
output = file_tuple[0]
return output
class PlotSettingsDialog(QDialog):
def __init__(self, ID=None, ignore=False, force_direction=False, work=True, working_speed=1.0, hitch_height=0.6, pto_rpm=0.0, working_speed_min=1.0, working_speed_max=6.0, hitch_height_min=0.16, hitch_height_max=0.6, pto_rpm_min=0, pto_rpm_max=4000, *args, **kwargs):
# super(PlotSettingsDialog, self).__init__(*args,**kwargs)
super().__init__(*args,**kwargs)
self.setWindowTitle('Plot settings')
# Setup buttons
self.buttonBox = QDialogButtonBox()
self.buttonBox.addButton("Ok", QDialogButtonBox.AcceptRole)
self.buttonBox.addButton("Cancel", QDialogButtonBox.RejectRole)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.ID_widget = QLabel()
self.ID_widget.setText(str(ID))
self.ignore_widget = QCheckBox()
if (ignore):
self.ignore_widget.setCheckState(Qt.Checked)
else:
self.ignore_widget.setCheckState(Qt.Unchecked)
ignore_label = QLabel()
ignore_label.setText('Ignore:')
ignore_label.setWhatsThis('Ignored plots will never be entered during route planning.')
self.force_direction_widget = QCheckBox()
if (force_direction):
self.force_direction_widget.setCheckState(Qt.Checked)
else:
self.force_direction_widget.setCheckState(Qt.Unchecked)
self.force_direction_widget.setEnabled(False) #NOTE: Disable since the feature in the Robotti planner is currently disabled/not implemented
force_direction_label = QLabel()
force_direction_label.setText('Force A->B:')
force_direction_label.setWhatsThis('Force the route planner to always drive in the A->B direction through plots. NOTE: This is currently disabled in the route planner.')
self.work_widget = QCheckBox()
if (work):
self.work_widget.setCheckState(Qt.Checked)
else:
self.work_widget.setCheckState(Qt.Unchecked)
work_label = QLabel()
work_label.setText('Work:')
work_label.setWhatsThis('Non-work plots may be used for traversing by the route planner. To completely avoid entering a plot use the Ignore field.')
self.working_speed_widget = QDoubleSpinBox()
self.working_speed_widget.setMinimum(working_speed_min)
self.working_speed_widget.setMaximum(working_speed_max)
self.working_speed_widget.setValue(working_speed)
self.working_speed_widget.setSingleStep(0.5)
self.working_speed_widget.setSuffix(' km/h')
self.working_speed_widget.setAlignment(Qt.AlignRight)
self.hitch_height_widget = QDoubleSpinBox()
self.hitch_height_widget.setMinimum(hitch_height_min)
self.hitch_height_widget.setMaximum(hitch_height_max)
self.hitch_height_widget.setValue(hitch_height)
self.hitch_height_widget.setSingleStep(0.05)
self.hitch_height_widget.setSuffix(' m')
self.hitch_height_widget.setAlignment(Qt.AlignRight)
self.pto_rpm_widget = QSpinBox()
self.pto_rpm_widget.setMinimum(pto_rpm_min)
self.pto_rpm_widget.setMaximum(pto_rpm_max)
self.pto_rpm_widget.setValue(pto_rpm)
self.pto_rpm_widget.setSingleStep(100)
self.pto_rpm_widget.setSuffix(' rpm')
self.pto_rpm_widget.setAlignment(Qt.AlignRight)
layout = QFormLayout()
layout.addRow('Plot ID:', self.ID_widget)
layout.addRow(ignore_label, self.ignore_widget)
layout.addRow(force_direction_label, self.force_direction_widget)
layout.addRow(work_label, self.work_widget)
layout.addRow('Working speed:', self.working_speed_widget)
layout.addRow('Hitch height:', self.hitch_height_widget)
layout.addRow('PTO:', self.pto_rpm_widget)
# Add buttons
layout.addWidget(self.buttonBox)
self.setLayout(layout)
def get_settings(self):
settings_dict = {'ID': self.ID_widget.text(),
'ignore': bool(self.ignore_widget.checkState()),
'force_direction': bool(self.force_direction_widget.checkState()),
'work': bool(self.work_widget.checkState()),
'working_speed': self.working_speed_widget.value(),
'hitch_height': self.hitch_height_widget.value(),
'pto_rpm': self.pto_rpm_widget.value()}
return settings_dict
class ResearchPlannerGUI(QMainWindow):
plan = None
def __init__(self, app=None, plan=None, *args, **kwargs):
super().__init__(*args,**kwargs)
self.plan = ResearchPlan.Plan()
self._init_gui()
def _init_gui(self):
self.statusBar().showMessage('Loading GUI')
self.setGeometry(300, 300, 800, 600)
self.setWindowTitle('Research Planner')
### Setup menus
menubar = self.menuBar()
## "File" menu
file_menu = menubar.addMenu('&File')
exit_action = QAction('&Quit', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Quit Research Planner')
exit_action.triggered.connect(qApp.quit)
import_plots_action = QAction('Import &Plots', self)
import_plots_action.setShortcut('Ctrl+P')
import_plots_action.setStatusTip('Import plots')
import_plots_action.triggered.connect(self.import_plots)
import_field_action = QAction('Import Field', self)
import_field_action.setShortcut('Ctrl+F')
import_field_action.setStatusTip('Import field')
import_field_action.triggered.connect(self.import_field)
export_plots_action = QAction('Export Plots', self)
export_plots_action.setStatusTip('Export plots to Robotti compatible json-format')
export_plots_action.triggered.connect(self.export_plots)
export_field_action = QAction('Export Field', self)
export_field_action.setStatusTip('Export field to Robotti compatible json-format')
export_field_action.triggered.connect(self.export_field)
file_menu.addAction(import_plots_action)
file_menu.addAction(import_field_action)
file_menu.addSeparator()
file_menu.addAction(export_plots_action)
file_menu.addAction(export_field_action)
file_menu.addSeparator()
file_menu.addAction(exit_action)
## "Edit" menu
edit_menu = menubar.addMenu('&Edit')
settings_all_plots_action = QAction('All Plots', self)
settings_all_plots_action.setStatusTip('Edit the settings of all plots at once')
settings_all_plots_action.triggered.connect(self.settings_all_plots)
edit_menu.addAction(settings_all_plots_action)
## "View" menu
view_menu = menubar.addMenu('&View')
view_plot_action = QAction('Show plots', self, checkable=True)
view_plot_action.setStatusTip('Toggle displaying plot outlines')
view_plot_action.setChecked(True)
view_plot_action.triggered.connect(self.toggle_view_plot)
view_ab_line_action = QAction('Show AB-lines', self, checkable=True)
view_ab_line_action.setStatusTip('Toggle displaying the AB-lines of plots')
view_ab_line_action.setChecked(False)
view_ab_line_action.triggered.connect(self.toggle_view_ab_line)
view_end_points_action = QAction('Show end points', self, checkable=True)
view_end_points_action.setStatusTip('Toggle displaying the end points of plots on the AB-line')
view_end_points_action.setChecked(True)
view_end_points_action.triggered.connect(self.toggle_view_end_points)
view_field_action = QAction('Show field', self, checkable=True)
view_field_action.setStatusTip('Toggle displaying field outlines')
view_field_action.setChecked(True)
view_field_action.triggered.connect(self.toggle_view_field)
view_menu.addAction(view_plot_action)
view_menu.addAction(view_ab_line_action)
view_menu.addAction(view_end_points_action)
view_menu.addSeparator()
view_menu.addAction(view_field_action)
self._reset_view()
### Setup main area
window_widget = QWidget()
## Setup figure canvas
self.figure = Figure()
self.canvas = FigureCanvas(self.figure)
self.ax = self.figure.add_subplot(111)
self.canvas.mpl_connect('pick_event', self.on_pick_event)
## Setup layout of window widget
layout = QVBoxLayout()
layout.addWidget(self.canvas)
window_widget.setLayout(layout)
# Set window widget as central widget of main window
self.setCentralWidget(window_widget)
self.statusBar().showMessage('Ready')
self.show()
def on_pick_event(self, event):
# print(event.artist)
plot_id = event.artist.get_text()
plot = None
for idx, _plot in enumerate(self.plan.plots):
if _plot.ID == plot_id:
plot = _plot
plot_idx = idx
break
if plot is not None:
plotDlg = GUI.PlotSettingsDialog(ID=plot_id, ignore=plot.ignored, force_direction=plot.force_direction, work=plot.work, working_speed=plot.working_speed, hitch_height=plot.hitch_height, pto_rpm=plot.pto_rpm)
if (plotDlg.exec_()):
settings = plotDlg.get_settings()
print(settings)
plot.ignored = settings['ignore']
plot.force_direction = settings['force_direction']
plot.work = settings['work']
plot.working_speed = settings['working_speed']
plot.hitch_height = settings['hitch_height']
plot.pto_rpm = settings['pto_rpm']
self._update_canvas()
def _reset_view(self):
self._show_plots = True
self._show_ab_lines = False
self._show_end_points = True
self._show_field = True
def import_plots(self):
self.statusBar().showMessage('Importing plots...')
try:
import_dlg = GUI.ImportFileDialog()
filename_plots = import_dlg.get_file(caption='Import plots', filter='CSV (*.csv);;All files (*.*)')
self.plan.read_plot_csv(filename_plots, is_utm=True)
self._update_canvas()
self.statusBar().showMessage('Plots imported: ' + filename_plots)
except UserWarning as e:
self.statusBar().showMessage(str(e))
def import_field(self):
self.statusBar().showMessage('Importing field...')
try:
import_dlg = GUI.ImportFileDialog()
filename_field = import_dlg.get_file(caption='Import field', filter='CSV (*.csv);;All files (*.*)')
self.plan.read_field_csv(filename_field, is_utm=True)
self._update_canvas()
self.statusBar().showMessage('Field imported: ' + filename_field)
except UserWarning as e:
self.statusBar().showMessage(str(e))
def export_plots(self):
self.statusBar().showMessage('Exporting plots...')
try:
export_dlg = GUI.ExportFileDialog()
filename_out_plots = export_dlg.get_file(caption='Export plots')
self.plan.export_plots(filename_out_plots)
self.statusBar().showMessage('Plots exported: ' + filename_out_plots)
except UserWarning as e:
self.statusBar().showMessage(str(e))
def export_field(self):
self.statusBar().showMessage('Exporting field...')
try:
export_dlg = GUI.ExportFileDialog()
filename_out_field = export_dlg.get_file(caption='Export field')
self.plan.export_field(filename_out_field)
self.statusBar().showMessage('Field exported: ' + filename_out_field)
except UserWarning as e:
self.statusBar().showMessage(str(e))
def settings_all_plots(self):
if (self.plan.plots is not None):
plot = self.plan.plots[0]
plotDlg = GUI.PlotSettingsDialog(ignore=plot.ignored, force_direction=plot.force_direction, work=plot.work, working_speed=plot.working_speed, hitch_height=plot.hitch_height, pto_rpm=plot.pto_rpm)
if (plotDlg.exec_()):
settings = plotDlg.get_settings()
print('Settings:')
print(settings)
# Update all plots with settings
for plot in self.plan.plots:
plot.ignored = settings['ignore']
plot.force_direction = settings['force_direction']
plot.work = settings['work']
plot.working_speed = settings['working_speed']
plot.hitch_height = settings['hitch_height']
plot.pto_rpm = settings['pto_rpm']
self._update_canvas()
def toggle_view_plot(self, state):
if state:
self._show_plots = True
else:
self._show_plots = False
self._update_canvas()
def toggle_view_field(self, state):
if state:
self._show_field = True
else:
self._show_field = False
self._update_canvas()
def toggle_view_ab_line(self, state):
if state:
self._show_ab_lines = True
else:
self._show_ab_lines = False
self._update_canvas()
def toggle_view_end_points(self, state):
if state:
self._show_end_points = True
else:
self._show_end_points = False
self._update_canvas()
def _update_canvas(self):
self.statusBar().showMessage('Updating canvas...')
self.ax.clear()
self.plan.draw(ax=self.ax, show_field=self._show_field, show_plot=self._show_plots, show_AB_line=self._show_ab_lines, show_AB=self._show_ab_lines, show_end_points=self._show_end_points)
ResearchPlan.plt.show()
self.canvas.draw()
self.statusBar().showMessage('Canvas updated')
if __name__ == '__main__':
app = QApplication(sys.argv)
main = ResearchPlannerGUI()
main.show()
sys.exit(app.exec_())
| 40.711491
| 275
| 0.639902
|
29ade526c99b280f6298295415b97875ddfd14e0
| 5,838
|
py
|
Python
|
round1a/python/scoring_harness/challenge_config.py
|
allaway/IDG-DREAM-Drug-Kinase-Challenge
|
9f75cb27fa927880f61fccc46860e7e12f8da29e
|
[
"Apache-2.0"
] | null | null | null |
round1a/python/scoring_harness/challenge_config.py
|
allaway/IDG-DREAM-Drug-Kinase-Challenge
|
9f75cb27fa927880f61fccc46860e7e12f8da29e
|
[
"Apache-2.0"
] | null | null | null |
round1a/python/scoring_harness/challenge_config.py
|
allaway/IDG-DREAM-Drug-Kinase-Challenge
|
9f75cb27fa927880f61fccc46860e7e12f8da29e
|
[
"Apache-2.0"
] | null | null | null |
"""
config for drug target challenge.
"""
import pandas as pd
import os
import sys
import evaluation_metrics_python2 as eval
CHALLENGE_SYN_ID = "syn15667962"
CHALLENGE_NAME = "IDG-DREAM Drug-Kinase Binding Prediction Challenge"
ADMIN_USER_IDS = [3360851]
REQUIRED_COLUMNS = [
"Compound_SMILES", "Compound_InchiKeys", "Compound_Name", "UniProt_Id",
"Entrez_Gene_Symbol", "DiscoveRx_Gene_Symbol", "pKd_[M]_pred"]
CONFIG_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))
def validate_func(submission, goldstandard_path):
try:
sub_df = pd.read_csv(submission.filePath)
except Exception as e:
error_string = "Error reading in submission file: " + str(e)
raise AssertionError(error_string)
gs_df = pd.read_csv(goldstandard_path)
for col in REQUIRED_COLUMNS:
assert col in sub_df.columns, (
"Submission file is missing column: " + col)
assert sub_df.shape[0] == sub_df.shape[0], (
"Submission file has missing values.")
sub_df["pKd_[M]_pred"] = pd.to_numeric(sub_df["pKd_[M]_pred"],
errors='coerce')
assert sub_df.shape[0] == sub_df.dropna().shape[0], (
"Submission file has missing values, after converting prediction " +
"column to float values.")
assert sub_df.shape[0] == gs_df.shape[0], (
"Submission file should have " +
str(gs_df.shape[0]) +
" predictions")
try:
combined_df = pd.merge(sub_df, gs_df, how='inner')
except Exception as e:
error_string = "Error combing submission and gold standard file"
raise AssertionError(error_string)
left_join_df = pd.merge(sub_df, gs_df, how='left')
left_join_na_df = left_join_df[left_join_df.isnull().any(1)]
na_row_indeces = left_join_na_df.index.tolist()
na_row_numbers = ", ".join([str(i + 2) for i in na_row_indeces])
assert combined_df.shape[0] == gs_df.shape[0], (
"Merge failed due to inconsistent values between submission file and" +
" validation file in one or more label columns at rows: " +
na_row_numbers)
assert combined_df["pKd_[M]_pred"].var() != 0, (
"After merging submission file and gold standard, prediction column" +
" has variance of 0.")
return(True, "Passed Validation")
def score1(submission, goldstandard_path):
sub_df = pd.read_csv(submission.filePath)
gs_df = pd.read_csv(goldstandard_path)
combined_df = pd.merge(sub_df, gs_df, how='inner')
actual = combined_df["pKd_[M]"]
predicted = combined_df["pKd_[M]_pred"]
rmse = round(eval.rmse(actual, predicted), 4)
pearson = round(eval.pearson(actual, predicted), 4)
spearman = round(eval.spearman(actual, predicted), 4)
ci = round(eval.ci(actual, predicted), 4)
f1 = round(eval.f1(actual, predicted), 4)
average_AUC = round(eval.average_AUC(actual, predicted), 4)
return(rmse, pearson, spearman, ci, f1, average_AUC)
evaluation_queues = [
{
'id': 9614078,
'scoring_func': score1,
'validation_func': validate_func,
'goldstandard_path': CONFIG_DIR + "/round_1_test_data.csv"
},
{
'id': 9614079,
'scoring_func': score1,
'validation_func': validate_func,
'goldstandard_path': CONFIG_DIR + "/round_2_test_data.csv"
}
]
evaluation_queue_by_id = {q['id']:q for q in evaluation_queues}
## define the default set of columns that will make up the leaderboard
LEADERBOARD_COLUMNS = [
dict(name='objectId', display_name='ID', columnType='STRING', maximumSize=20),
dict(name='userId', display_name='User', columnType='STRING', maximumSize=20, renderer='userid'),
dict(name='entityId', display_name='Entity', columnType='STRING', maximumSize=20, renderer='synapseid'),
dict(name='versionNumber', display_name='Version', columnType='INTEGER'),
dict(name='name', display_name='Name', columnType='STRING', maximumSize=240),
dict(name='team', display_name='Team', columnType='STRING', maximumSize=240)]
## Here we're adding columns for the output of our scoring functions, score,
## rmse and auc to the basic leaderboard information. In general, different
## questions would typically have different scoring metrics.
leaderboard_columns = {}
for q in evaluation_queues:
leaderboard_columns[q['id']] = LEADERBOARD_COLUMNS + [
dict(name='score', display_name='Score', columnType='DOUBLE'),
dict(name='rmse', display_name='RMSE', columnType='DOUBLE'),
dict(name='auc', display_name='AUC', columnType='DOUBLE')]
## map each evaluation queues to the synapse ID of a table object
## where the table holds a leaderboard for that question
leaderboard_tables = {}
def validate_submission(evaluation, submission):
"""
Find the right validation function and validate the submission.
:returns: (True, message) if validated, (False, message) if
validation fails or throws exception
"""
config = evaluation_queue_by_id[int(evaluation.id)]
validated, validation_message = config['validation_func'](submission, config['goldstandard_path'])
return True, validation_message
def score_submission(evaluation, submission):
"""
Find the right scoring function and score the submission
:returns: (score, message) where score is a dict of stats and message
is text for display to user
"""
config = evaluation_queue_by_id[int(evaluation.id)]
score = config['scoring_func'](submission, config['goldstandard_path'])
return (dict(
rmse=score[0],
pearson=score[1],
spearman=score[2],
ci=score[3],
f1=score[4],
average_AUC=score[5]),
"You did fine!")
| 36.949367
| 114
| 0.667866
|
24227590c10899a3b989346bf539625f5f5fdd89
| 840
|
py
|
Python
|
gitlist.py
|
cloudmesh/cloudmesh-notebook
|
106c489f8e0ec82b97dec935d2684d40e2e79452
|
[
"Apache-2.0"
] | null | null | null |
gitlist.py
|
cloudmesh/cloudmesh-notebook
|
106c489f8e0ec82b97dec935d2684d40e2e79452
|
[
"Apache-2.0"
] | null | null | null |
gitlist.py
|
cloudmesh/cloudmesh-notebook
|
106c489f8e0ec82b97dec935d2684d40e2e79452
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import requests
from pprint import pprint
from cloudmesh.common.Printer import Printer
order=["full_name", "description", "open_issues", "created_at", "updated_at"]
def repos(organization):
entries = []
page = 0
while True:
path = f"https://api.github.com/orgs/{organization}/repos?page={page}&per_page=500"
r = requests.get(path)
d = r.json()
if len(d) == 0:
break
else:
entries.extend(d)
page = page + 1
print(Printer.flatwrite(entries, order=order, output='table'))
print()
print ("Number of entries:", len(entries))
repos("cloudmesh-community")
repos("cloudmesh")
repos("bigdata-i523")
| 21
| 91
| 0.634524
|
0925068f4eb6201f3863521c17c647cfdac1d45d
| 1,537
|
py
|
Python
|
tests/model/generated/endpoint/test_draft_share_invite_bank.py
|
luisriverag/sdk_python
|
8cb9f77da5e1306110706518417e4ddc65c6207f
|
[
"MIT"
] | 88
|
2017-08-01T18:39:46.000Z
|
2022-02-21T12:34:16.000Z
|
tests/model/generated/endpoint/test_draft_share_invite_bank.py
|
luisriverag/sdk_python
|
8cb9f77da5e1306110706518417e4ddc65c6207f
|
[
"MIT"
] | 136
|
2017-08-02T13:54:41.000Z
|
2021-04-25T20:31:08.000Z
|
tests/model/generated/endpoint/test_draft_share_invite_bank.py
|
luisriverag/sdk_python
|
8cb9f77da5e1306110706518417e4ddc65c6207f
|
[
"MIT"
] | 30
|
2017-08-15T09:35:42.000Z
|
2021-05-06T12:42:06.000Z
|
from datetime import datetime
from datetime import timedelta
from bunq.sdk.model.generated.endpoint import DraftShareInviteBank
from bunq.sdk.model.generated.endpoint import DraftShareInviteBankQrCodeContent
from bunq.sdk.model.generated.object_ import DraftShareInviteEntry
from bunq.sdk.model.generated.object_ import ShareDetail
from bunq.sdk.model.generated.object_ import ShareDetailReadOnly
from tests.bunq_test import BunqSdkTestCase
class TestDraftShareInvite(BunqSdkTestCase):
"""
Tests:
DraftShareInviteBankEntry
DraftShareInviteBankQrCodeContent
"""
_OUT_PUT_FILE_PATH = 'connectQr.png'
_WRITE_BYTES = 'wb'
_EXPIRATION_ADDED_TIME = 1
def test_draft_share_invite_bank(self):
"""
Tests the creation of a connect and retrieves the QR code bound to
this connect.
This test has no assertion as of its testing to see if the code runs
without errors
"""
share_detail = ShareDetail(read_only=ShareDetailReadOnly(True, True, True))
share_settings = DraftShareInviteEntry(share_detail)
draft_id = DraftShareInviteBank.create(self.expiration_date, share_settings).value
connect_qr = DraftShareInviteBankQrCodeContent.list(draft_id).value
with open(self._OUT_PUT_FILE_PATH, self._WRITE_BYTES) as f:
f.write(connect_qr)
@property
def expiration_date(self) -> str:
date = datetime.utcnow() + timedelta(hours=self._EXPIRATION_ADDED_TIME)
return date.isoformat()
| 34.155556
| 91
| 0.743006
|
a98c287341e106c75909ab81631bdad6fcf09ae1
| 12,063
|
py
|
Python
|
tests/targets/test_android.py
|
fullbuffer/buildozer
|
459807465ade35fca1fdd71edfe49b157b3b1f42
|
[
"MIT"
] | null | null | null |
tests/targets/test_android.py
|
fullbuffer/buildozer
|
459807465ade35fca1fdd71edfe49b157b3b1f42
|
[
"MIT"
] | null | null | null |
tests/targets/test_android.py
|
fullbuffer/buildozer
|
459807465ade35fca1fdd71edfe49b157b3b1f42
|
[
"MIT"
] | null | null | null |
import os
import tempfile
from unittest import mock
import pytest
from buildozer.targets.android import TargetAndroid
from tests.targets.utils import (
init_buildozer,
patch_buildozer,
patch_buildozer_checkbin,
patch_buildozer_cmd,
patch_buildozer_file_exists,
)
def patch_buildozer_cmd_expect():
return patch_buildozer("cmd_expect")
def patch_buildozer_download():
return patch_buildozer("download")
def patch_buildozer_file_extract():
return patch_buildozer("file_extract")
def patch_os_isfile():
return mock.patch("os.path.isfile")
def patch_target_android(method):
return mock.patch(
"buildozer.targets.android.TargetAndroid.{method}".format(method=method)
)
def patch_platform(platform):
return mock.patch("buildozer.targets.android.platform", platform)
def init_target(temp_dir, options=None):
buildozer = init_buildozer(temp_dir, "android", options)
return TargetAndroid(buildozer)
def call_build_package(target_android):
"""
Call the build_package() method of the tested TargetAndroid instance,
patching the functions that would otherwise produce side-effects.
Return the mocked execute_build_package() method of the TargetAndroid
instance so that tests can easily check which command-line arguments
would be passed on to python-for-android's toolchain.
"""
buildozer = target_android.buildozer
expected_dist_dir = (
'{buildozer_dir}/android/platform/build-armeabi-v7a/dists/myapp__armeabi-v7a'.format(
buildozer_dir=buildozer.buildozer_dir)
)
with patch_target_android('_update_libraries_references') as m_update_libraries_references, \
patch_target_android('_generate_whitelist') as m_generate_whitelist, \
mock.patch('buildozer.targets.android.TargetAndroid.execute_build_package') as m_execute_build_package, \
mock.patch('buildozer.targets.android.copyfile') as m_copyfile, \
mock.patch('buildozer.targets.android.os.listdir') as m_listdir:
m_listdir.return_value = ['30.0.0-rc2']
target_android.build_package()
assert m_listdir.call_count == 1
assert m_update_libraries_references.call_args_list == [
mock.call(expected_dist_dir)
]
assert m_generate_whitelist.call_args_list == [mock.call(expected_dist_dir)]
assert m_copyfile.call_args_list == [
mock.call(
'{expected_dist_dir}/bin/MyApplication-0.1-debug.apk'.format(
expected_dist_dir=expected_dist_dir
),
'{bin_dir}/myapp-0.1-armeabi-v7a-debug.apk'.format(bin_dir=buildozer.bin_dir),
)
]
return m_execute_build_package
class TestTargetAndroid:
def setup_method(self):
"""
Create a temporary directory that will contain the spec file and will
serve as the root_dir.
"""
self.temp_dir = tempfile.TemporaryDirectory()
def tear_method(self):
"""
Remove the temporary directory created in self.setup_method.
"""
self.temp_dir.cleanup()
def test_init(self):
"""Tests init defaults."""
target_android = init_target(self.temp_dir)
buildozer = target_android.buildozer
assert target_android._arch == "armeabi-v7a"
assert target_android._build_dir.endswith(
".buildozer/android/platform/build-armeabi-v7a"
)
assert target_android._p4a_bootstrap == "sdl2"
assert target_android._p4a_cmd.endswith(
"python -m pythonforandroid.toolchain "
)
assert target_android.build_mode == "debug"
assert (
target_android.extra_p4a_args == (
' --color=always'
' --storage-dir="{buildozer_dir}/android/platform/build-armeabi-v7a" --ndk-api=21'.format(
buildozer_dir=buildozer.buildozer_dir)
)
)
assert target_android.p4a_apk_cmd == "apk --debug --bootstrap=sdl2"
assert target_android.platform_update is False
def test_init_positional_buildozer(self):
"""Positional `buildozer` argument is required."""
with pytest.raises(TypeError) as ex_info:
TargetAndroid()
assert ex_info.value.args == (
"__init__() missing 1 required positional argument: 'buildozer'",
)
def test_sdkmanager(self):
"""Tests the _sdkmanager() method."""
target_android = init_target(self.temp_dir)
kwargs = {}
with patch_buildozer_cmd() as m_cmd, patch_buildozer_cmd_expect() as m_cmd_expect, patch_os_isfile() as m_isfile:
m_isfile.return_value = True
assert m_cmd.return_value == target_android._sdkmanager(**kwargs)
assert m_cmd.call_count == 1
assert m_cmd_expect.call_count == 0
assert m_isfile.call_count == 1
kwargs = {"return_child": True}
with patch_buildozer_cmd() as m_cmd, patch_buildozer_cmd_expect() as m_cmd_expect, patch_os_isfile() as m_isfile:
m_isfile.return_value = True
assert m_cmd_expect.return_value == target_android._sdkmanager(
**kwargs
)
assert m_cmd.call_count == 0
assert m_cmd_expect.call_count == 1
assert m_isfile.call_count == 1
def test_check_requirements(self):
"""Basic tests for the check_requirements() method."""
target_android = init_target(self.temp_dir)
buildozer = target_android.buildozer
assert not hasattr(target_android, "adb_cmd")
assert not hasattr(target_android, "javac_cmd")
assert "PATH" not in buildozer.environ
with patch_buildozer_checkbin() as m_checkbin:
target_android.check_requirements()
assert m_checkbin.call_args_list == [
mock.call("Git (git)", "git"),
mock.call("Cython (cython)", "cython"),
mock.call("Java compiler (javac)", "javac"),
mock.call("Java keytool (keytool)", "keytool"),
]
assert target_android.adb_cmd.endswith(
".buildozer/android/platform/android-sdk/platform-tools/adb"
)
assert target_android.javac_cmd == "javac"
assert target_android.keytool_cmd == "keytool"
assert "PATH" in buildozer.environ
def test_check_configuration_tokens(self):
"""Basic tests for the check_configuration_tokens() method."""
target_android = init_target(self.temp_dir)
with mock.patch(
"buildozer.targets.android.Target.check_configuration_tokens"
) as m_check_configuration_tokens:
target_android.check_configuration_tokens()
assert m_check_configuration_tokens.call_args_list == [mock.call([])]
@pytest.mark.parametrize("platform", ["linux", "darwin"])
def test_install_android_sdk(self, platform):
"""Basic tests for the _install_android_sdk() method."""
target_android = init_target(self.temp_dir)
with patch_buildozer_file_exists() as m_file_exists, patch_buildozer_download() as m_download:
m_file_exists.return_value = True
sdk_dir = target_android._install_android_sdk()
assert m_file_exists.call_args_list == [
mock.call(target_android.android_sdk_dir)
]
assert m_download.call_args_list == []
assert sdk_dir.endswith(".buildozer/android/platform/android-sdk")
with patch_buildozer_file_exists() as m_file_exists, \
patch_buildozer_download() as m_download, \
patch_buildozer_file_extract() as m_file_extract, \
patch_platform(platform):
m_file_exists.return_value = False
sdk_dir = target_android._install_android_sdk()
assert m_file_exists.call_args_list == [
mock.call(target_android.android_sdk_dir)
]
platform_map = {"linux": "linux", "darwin": "mac"}
platform = platform_map[platform]
archive = "commandlinetools-{platform}-6514223_latest.zip".format(platform=platform)
assert m_download.call_args_list == [
mock.call(
"https://dl.google.com/android/repository/",
archive,
cwd=mock.ANY,
)
]
assert m_file_extract.call_args_list == [mock.call(archive, cwd=mock.ANY)]
assert sdk_dir.endswith(".buildozer/android/platform/android-sdk")
def test_build_package(self):
"""Basic tests for the build_package() method."""
target_android = init_target(self.temp_dir)
buildozer = target_android.buildozer
m_execute_build_package = call_build_package(target_android)
assert m_execute_build_package.call_args_list == [
mock.call(
[
("--name", "'My Application'"),
("--version", "0.1"),
("--package", "org.test.myapp"),
("--minsdk", "21"),
("--ndk-api", "21"),
("--private", "{buildozer_dir}/android/app".format(buildozer_dir=buildozer.buildozer_dir)),
("--android-entrypoint", "org.kivy.android.PythonActivity"),
("--android-apptheme", "@android:style/Theme.NoTitleBar"),
("--orientation", "portrait"),
("--window",),
("debug",),
]
)
]
def test_numeric_version(self):
"""The `android.numeric_version` config should be passed to `build_package()`."""
target_android = init_target(self.temp_dir, {
"android.numeric_version": "1234"
})
buildozer = target_android.buildozer
m_execute_build_package = call_build_package(target_android)
assert m_execute_build_package.call_args_list == [
mock.call(
[
("--name", "'My Application'"),
("--version", "0.1"),
("--package", "org.test.myapp"),
("--minsdk", "21"),
("--ndk-api", "21"),
("--private", "{buildozer_dir}/android/app".format(buildozer_dir=buildozer.buildozer_dir)),
("--android-entrypoint", "org.kivy.android.PythonActivity"),
("--android-apptheme", "@android:style/Theme.NoTitleBar"),
("--orientation", "portrait"),
("--window",),
("--numeric-version", "1234"),
("debug",),
]
)
]
def test_build_package_intent_filters(self):
"""
The build_package() method should honour the manifest.intent_filters
config option.
"""
filters_path = os.path.join(self.temp_dir.name, 'filters.xml')
with open(filters_path, 'w') as f:
f.write('<?xml version="1.0" encoding="utf-8"?>')
target_android = init_target(self.temp_dir, {
'android.manifest.intent_filters': 'filters.xml'
})
buildozer = target_android.buildozer
m_execute_build_package = call_build_package(target_android)
assert m_execute_build_package.call_args_list == [
mock.call(
[
('--name', "'My Application'"),
('--version', '0.1'),
('--package', 'org.test.myapp'),
('--minsdk', '21'),
('--ndk-api', '21'),
('--private', '{buildozer_dir}/android/app'.format(buildozer_dir=buildozer.buildozer_dir)),
('--android-entrypoint', 'org.kivy.android.PythonActivity'),
('--android-apptheme', '@android:style/Theme.NoTitleBar'),
('--orientation', 'portrait'),
('--window',),
('--intent-filters', os.path.realpath(filters_path)),
('debug',),
]
)
]
| 40.076412
| 121
| 0.615353
|
ca871205bd4316882721a6cb9e5c8296689ec886
| 6,921
|
py
|
Python
|
test/test_constraints.py
|
pyro-ppl/pyroed
|
c549bd9dc9511e2199ff55fb2c86f84226b9b1c2
|
[
"Apache-2.0"
] | 12
|
2022-03-15T16:27:06.000Z
|
2022-03-25T09:14:45.000Z
|
test/test_constraints.py
|
broadinstitute/pyroed
|
c549bd9dc9511e2199ff55fb2c86f84226b9b1c2
|
[
"Apache-2.0"
] | 2
|
2022-03-22T17:42:28.000Z
|
2022-03-22T17:43:30.000Z
|
test/test_constraints.py
|
pyro-ppl/pyroed
|
c549bd9dc9511e2199ff55fb2c86f84226b9b1c2
|
[
"Apache-2.0"
] | 1
|
2022-03-17T17:02:40.000Z
|
2022-03-17T17:02:40.000Z
|
from collections import OrderedDict
from typing import List, Optional
from pyroed.api import encode_design
from pyroed.constraints import (
AllDifferent,
And,
Function,
Iff,
IfThen,
Not,
Or,
TakesValue,
TakesValues,
Xor,
)
from pyroed.typing import Schema
def stringify(bools: List[bool]) -> str:
return "".join("1" if x else "0" for x in bools)
def test_immune_sequence():
SCHEMA: Schema = OrderedDict()
SCHEMA["Protein 1"] = ["Prot1", "Prot2", None]
SCHEMA["Protein 2"] = ["Prot3", "HLA1", "HLA2", "HLA3", "HLA4", None]
SCHEMA["Signalling Pep"] = ["Sig1", "Sig2", None]
SCHEMA["EP"] = [f"Ep{i}" for i in range(1, 10 + 1)]
SCHEMA["EP"].append(None)
SCHEMA["Linker"] = ["Link1", None]
SCHEMA["Internal"] = ["Int1", "Int2", "Int3", "Int3", None]
SCHEMA["2A-1"] = ["twoa1", "twoa2", None]
SCHEMA["2A-2"] = ["twoa3", "twoa4", None]
SCHEMA["2A-3"] = [f"twoa{i}" for i in range(1, 7 + 1)]
CONSTRAINTS = [
AllDifferent("2A-1", "2A-2", "2A-3"),
Iff(TakesValue("Protein 1", None), TakesValue("2A-1", None)),
Iff(TakesValue("Signalling Pep", None), TakesValue("EP", None)),
Iff(TakesValue("EP", None), TakesValue("Linker", None)),
IfThen(TakesValue("Protein 2", None), TakesValue("Internal", None)),
Iff(TakesValue("Protein 2", "Prot3"), TakesValue("2A-2", None)),
]
design: List[List[Optional[str]]] = [
["Prot1", "Prot3", "Sig1", "Ep1", "Link1", "Int1", "twoa1", None, "twoa2"],
["Prot1", "Prot3", "Sig1", "Ep1", "Link1", "Int1", "twoa1", None, "twoa1"],
[None, "Prot3", "Sig1", "Ep1", "Link1", "Int1", "twoa1", None, "twoa2"],
["Prot1", "Prot3", "Sig1", None, None, None, "twoa1", None, "twoa2"],
["Prot1", "Prot3", "Sig1", "Ep1", None, "Int1", "twoa1", None, "twoa2"],
["Prot1", None, "Sig1", "Ep1", "Link1", "Int1", "twoa1", "twoa4", "twoa2"],
["Prot1", "Prot3", "Sig1", "Ep1", "Link1", "Int1", "twoa1", "twoa4", "twoa2"],
]
sequences = encode_design(SCHEMA, design)
actual = [c(SCHEMA, sequences).tolist() for c in CONSTRAINTS]
assert actual[0] == [True, False, True, True, True, True, True]
assert actual[1] == [True, True, False, True, True, True, True]
assert actual[2] == [True, True, True, False, True, True, True]
assert actual[3] == [True, True, True, True, False, True, True]
assert actual[4] == [True, True, True, True, True, False, True]
assert actual[5] == [True, True, True, True, True, True, False]
def test_function():
SCHEMA = OrderedDict()
SCHEMA["foo"] = ["a", "b", "c", None]
SCHEMA["bar"] = ["a", "b", None]
CONSTRAINTS = [
Function(lambda x: x.sum(-1) <= 0),
Function(lambda x: x.sum(-1) <= 1),
Function(lambda x: x.sum(-1) <= 2),
Function(lambda x: x.sum(-1) <= 3),
Function(lambda x: x.sum(-1) <= 4),
Function(lambda x: x.sum(-1) <= 5),
]
design: List[List[Optional[str]]] = [
["a", "a"],
["a", "b"],
["a", None],
["b", "a"],
["b", "b"],
["b", None],
["c", "a"],
["c", "b"],
["c", None],
[None, "a"],
[None, "b"],
[None, None],
]
sequences = encode_design(SCHEMA, design)
actual = [c(SCHEMA, sequences).tolist() for c in CONSTRAINTS]
assert stringify(actual[0]) == "100000000000"
assert stringify(actual[1]) == "110100000000"
assert stringify(actual[2]) == "111110100000"
assert stringify(actual[3]) == "111111110100"
assert stringify(actual[4]) == "111111111110"
assert stringify(actual[5]) == "111111111111"
def test_takes_value():
SCHEMA = OrderedDict()
SCHEMA["foo"] = ["a", "b", "c", None]
SCHEMA["bar"] = ["a", "b", None]
CONSTRAINTS = [
TakesValue("foo", "a"),
TakesValue("foo", "b"),
TakesValue("foo", "c"),
TakesValue("foo", None),
TakesValue("bar", "a"),
TakesValue("bar", "b"),
TakesValue("bar", None),
]
design: List[List[Optional[str]]] = [
["a", "a"],
["a", "b"],
["a", None],
["b", "a"],
["b", "b"],
["b", None],
["c", "a"],
["c", "b"],
["c", None],
[None, "a"],
[None, "b"],
[None, None],
]
sequences = encode_design(SCHEMA, design)
actual = [c(SCHEMA, sequences).tolist() for c in CONSTRAINTS]
assert stringify(actual[0]) == "111000000000"
assert stringify(actual[1]) == "000111000000"
assert stringify(actual[2]) == "000000111000"
assert stringify(actual[3]) == "000000000111"
assert stringify(actual[4]) == "100100100100"
assert stringify(actual[5]) == "010010010010"
assert stringify(actual[6]) == "001001001001"
def test_takes_values():
SCHEMA = OrderedDict()
SCHEMA["foo"] = ["a", "b", "c", None]
SCHEMA["bar"] = ["a", "b", None]
CONSTRAINTS = [
TakesValues("foo", "a"),
TakesValues("foo", "b", "c"),
TakesValues("foo", "a", None),
TakesValues("bar", "a", "b", None),
TakesValues("bar", "b"),
TakesValues("bar"),
]
design: List[List[Optional[str]]] = [
["a", "a"],
["a", "b"],
["a", None],
["b", "a"],
["b", "b"],
["b", None],
["c", "a"],
["c", "b"],
["c", None],
[None, "a"],
[None, "b"],
[None, None],
]
sequences = encode_design(SCHEMA, design)
actual = [c(SCHEMA, sequences).tolist() for c in CONSTRAINTS]
assert stringify(actual[0]) == "111000000000"
assert stringify(actual[1]) == "000111111000"
assert stringify(actual[2]) == "111000000111"
assert stringify(actual[3]) == "111111111111"
assert stringify(actual[4]) == "010010010010"
assert stringify(actual[5]) == "000000000000"
def test_logic():
SCHEMA = OrderedDict()
SCHEMA["foo"] = ["a", None]
SCHEMA["bar"] = ["a", None]
foo = TakesValue("foo", "a")
bar = TakesValue("bar", "a")
CONSTRAINTS = [
foo,
bar,
Not(foo),
Not(bar),
And(foo, bar),
Or(foo, bar),
Xor(foo, bar),
Iff(foo, bar),
IfThen(foo, bar),
]
design: List[List[Optional[str]]] = [
["a", "a"],
["a", None],
[None, "a"],
[None, None],
]
sequences = encode_design(SCHEMA, design)
actual = [c(SCHEMA, sequences).tolist() for c in CONSTRAINTS]
assert stringify(actual[0]) == "1100"
assert stringify(actual[1]) == "1010"
assert stringify(actual[2]) == "0011"
assert stringify(actual[3]) == "0101"
assert stringify(actual[4]) == "1000"
assert stringify(actual[5]) == "1110"
assert stringify(actual[6]) == "0110"
assert stringify(actual[7]) == "1001"
assert stringify(actual[8]) == "1011"
| 31.175676
| 86
| 0.527669
|
a54cc4f81911648f430ffce3330e19c1d8a1d872
| 1,283
|
py
|
Python
|
roulette.py
|
rivcah/shecodes_python
|
8d907833e5793e0362df1207a1515f590c9a0642
|
[
"Unlicense"
] | null | null | null |
roulette.py
|
rivcah/shecodes_python
|
8d907833e5793e0362df1207a1515f590c9a0642
|
[
"Unlicense"
] | null | null | null |
roulette.py
|
rivcah/shecodes_python
|
8d907833e5793e0362df1207a1515f590c9a0642
|
[
"Unlicense"
] | null | null | null |
##This function was written as an exercise for the SheCodes' Python track.
##The first function works with Python's random library.
def roulette():
import random as r ##I import the random library.
##I chose 'import ... as ...' because in Python you have to
##call the library before you call its specific function.
##Then, importing random as r makes the whole job easier.
number = r.randint(0, 37) ##A random integer between 0 and 37 is chosen
return(number) ##The function returns the number randomly chosen
##by the function.
def choose():
number = roulette() ##It uses the previous function in order to choose
##a random integer
chosen = int(input("Choose a number between 0 and 36:\n"))
##asks for the input of the user
if chosen > number:
##compares the number given by the user to the random number.
##The user lost.
print("You lost! The number you chose is greater than the number the computer chose.")
elif chosen < number:
##The user lost.
print("You lost. The number you chose is lesser than the number the computer chose.")
else:
##The user wins.
print("You won! The numbers are equal.")
return(number)##That line is totally optional.
##You can choose not to show the random number.
| 34.675676
| 88
| 0.694466
|
6be08dcb64cd2360f381e3677b71d7ed1b039993
| 274
|
py
|
Python
|
draw.py
|
lemolatoon/CartPoleDQN
|
c5a2d0fb8e14cd45fb8c6f0267d9d16c7c475f14
|
[
"MIT"
] | null | null | null |
draw.py
|
lemolatoon/CartPoleDQN
|
c5a2d0fb8e14cd45fb8c6f0267d9d16c7c475f14
|
[
"MIT"
] | null | null | null |
draw.py
|
lemolatoon/CartPoleDQN
|
c5a2d0fb8e14cd45fb8c6f0267d9d16c7c475f14
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
rewards = pd.read_pickle("rewards.pkl")
plt.plot(range(len(rewards)), rewards)
plt.plot([0, 400], [195, 195], "--", color="darkred")
plt.xlabel("episodes")
plt.ylabel("Total Reward")
plt.savefig("rewards_fig.jpg")
| 30.444444
| 54
| 0.70073
|
b54d9a723933f527d4b5126722997be1cdff1e65
| 24,138
|
py
|
Python
|
CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top3/program/task1_eval.py
|
yolochai/scisumm-corpus
|
3aa7f89afbe051d7202575b46e8f7449f7a088b0
|
[
"CC-BY-4.0"
] | 198
|
2015-05-03T06:35:05.000Z
|
2022-03-21T19:12:20.000Z
|
CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top3/program/task1_eval.py
|
yolochai/scisumm-corpus
|
3aa7f89afbe051d7202575b46e8f7449f7a088b0
|
[
"CC-BY-4.0"
] | 24
|
2016-03-05T17:28:14.000Z
|
2021-07-28T08:14:57.000Z
|
CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top3/program/task1_eval.py
|
yolochai/scisumm-corpus
|
3aa7f89afbe051d7202575b46e8f7449f7a088b0
|
[
"CC-BY-4.0"
] | 89
|
2015-04-01T14:19:19.000Z
|
2022-03-19T18:47:56.000Z
|
import os
import sys
import json, csv
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
from copy import copy
def dictify(r,root=True):
if root:
return {r.tag : dictify(r, False)}
d=copy(r.attrib)
if r.text:
d["_text"]=r.text
for x in r.findall("./*"):
if x.tag not in d:
d[x.tag]=[]
d[x.tag].append(dictify(x,False))
return d
def parse(file):
print("parsing: " + str(file))
parse_data = {}
with open(file, "r") as f:
data = f.read().strip().split("\n")
for line in data:
line = line.strip()
if len(line) == 0:
continue
if line[-1] == "|":
line = line[0:-1]
# print("Old line: " + line)
line = line.replace("a | s, ", "a PIPE s, ")
# print("New line: " + line)
items = line.split(" | ")
line_data = {}
for kvpair in items:
if len(kvpair) == 0:
continue
# print kvpair
key = kvpair.strip().split(":", 1)[0].strip()
value = kvpair.strip().split(":", 1)[1].strip()
# print key + ":" + value
line_data[key] = value
if "Discourse Facet" not in line_data:
line_data["Discourse Facet"] = "None"
line_data["Reference Article"] = line_data["Reference Article"].replace(".xml", "")
line_data["Citing Article"] = line_data["Citing Article"].replace(".xml", "")
print("original cit marker offset is " + line_data["Citation Marker Offset"])
if line_data["Citation Marker Offset"].startswith("["):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
if line_data["Citation Marker Offset"].endswith("]"):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
if line_data["Citation Marker Offset"].startswith("\'"):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
if line_data["Citation Marker Offset"].endswith("\'"):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
if line_data["Citation Offset"].startswith("["):
line_data["Citation Offset"] = line_data["Citation Offset"][1:]
if line_data["Citation Offset"].endswith("]"):
line_data["Citation Offset"] = line_data["Citation Offset"][:-1]
print("new cit marker offset is " + line_data["Citation Marker Offset"])
if line_data["Reference Article"] not in parse_data:
parse_data[line_data["Reference Article"]] = {}
if line_data["Citing Article"] not in parse_data[line_data["Reference Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]] = {}
if line_data["Citation Marker Offset"] not in parse_data[line_data["Reference Article"]][line_data["Citing Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]] = {"original": line_data, "comparable": False}
ref_offset = line_data["Reference Offset"]
if ref_offset.startswith("["):
ref_offset = ref_offset[1:]
if ref_offset.endswith("]"):
ref_offset = ref_offset[:-1]
parsed_ref_offset_tmp = [x.strip() for x in ref_offset.split(",")]
print("\n\n")
print(parsed_ref_offset_tmp)
parsed_ref_offset = []
for ref in parsed_ref_offset_tmp:
print(ref)
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_ref_offset.append(ref)
print(parsed_ref_offset)
# print("<root>" + line_data["Reference Text"] + "</root>")
line = "<root>" + line_data["Reference Text"] + "</root>"
# print("Line is:")
# print(line)
line = line.replace("&", "&")
line = str(BeautifulSoup(line, "xml"))
# line = line.replace("<\s>", "</s>")
# print("Line is:")
# print(line)
root = ET.fromstring(line)
ref_text_dict = dictify(root)
# print(ref_text_dict)
ref_text_dict_clean = {}
cnt = 0
for item in ref_text_dict["root"]["S"]:
cnt += 1
ref_text_dict_clean[item.get("sid", cnt)] = item["_text"]
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Text"] = ref_text_dict_clean
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Offset"] = parsed_ref_offset
ref_discourse_facet = line_data["Discourse Facet"]
parsed_discourse_facet = []
if len(ref_discourse_facet) > 0:
if ref_discourse_facet[0] == "[":
parsed_discourse_facet_tmp = [x.strip().lower().replace(" ", "_") for x in ref_discourse_facet[1:-1].split(",")]
parsed_discourse_facet = []
for ref in parsed_discourse_facet_tmp:
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
else:
ref = ref_discourse_facet.lower().replace(" ", "_")
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Discourse Facet"] = parsed_discourse_facet
# print(json.dumps(parse_data, sort_keys=True, indent=4))
# print("###################################################################################################################")
return parse_data
def parse_csv(file):
print("parsing: " + str(file))
parse_data = {}
csv_obj = csv.reader(open(file,"r"))
items_list = None
for i, row in enumerate(csv_obj):
if i==0: # first line
items_list = row # Citance Number,Reference Article, ...
continue
line_data = {}
if len(row) != len(items_list):
print("Error: # of items mismatch")
print(items_list)
print(row)
continue
for key, value in zip(items_list, row):
# print kvpair
line_data[key] = value
if line_data["Reference Text"] == "NA":
continue
# print items_list
print(line_data["Reference Text"])
line_data["Reference Article"] = line_data["Reference Article"].replace(".xml", "")
line_data["Citing Article"] = line_data["Citing Article"].replace(".xml", "")
print("original cit marker offset is " + line_data["Citation Marker Offset"])
# if line_data["Citation Marker Offset"].startswith("["):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
# if line_data["Citation Marker Offset"].endswith("]"):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
# if line_data["Citation Marker Offset"].startswith("\'"):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
# if line_data["Citation Marker Offset"].endswith("\'"):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
# if line_data["Citation Offset"].startswith("["):
# line_data["Citation Offset"] = line_data["Citation Offset"][1:]
# if line_data["Citation Offset"].endswith("]"):
# line_data["Citation Offset"] = line_data["Citation Offset"][:-1]
line_data["Citation Marker Offset"] = '0'
line_data["Citation Offset"] = '0'
print("new cit marker offset is " + line_data["Citation Marker Offset"])
if line_data["Reference Article"] not in parse_data:
parse_data[line_data["Reference Article"]] = {}
if line_data["Citing Article"] not in parse_data[line_data["Reference Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]] = {}
if line_data["Citation Marker Offset"] not in parse_data[line_data["Reference Article"]][line_data["Citing Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]] = {"original": line_data, "comparable": False}
ref_offset = line_data["Reference Offset"]
if ref_offset.startswith("["):
ref_offset = ref_offset[1:]
if ref_offset.endswith("]"):
ref_offset = ref_offset[:-1]
parsed_ref_offset_tmp = [x.strip() for x in ref_offset.split(",")]
print("\n\n")
print(parsed_ref_offset_tmp)
parsed_ref_offset = []
for ref in parsed_ref_offset_tmp:
print(ref)
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_ref_offset.append(ref)
print(parsed_ref_offset)
# print("<root>" + line_data["Reference Text"] + "</root>")
line = "<root>" + line_data["Reference Text"] + "</root>"
# print("Line is:")
# print(line)
line = line.replace("&", "&")
line = line.replace("&", "&")
line = str(BeautifulSoup(line, "xml"))
# line = line.replace("<\s>", "</s>")
# print("Line is:")
# print(line)
root = ET.fromstring(line)
ref_text_dict = dictify(root)
# print(ref_text_dict)
ref_text_dict_clean = {}
cnt = 0
# if "S" not in ref_text_dict["root"]:
# # print "Key Error at", file
# continue
try:
for item in ref_text_dict["root"]["S"]:
cnt += 1
ref_text_dict_clean[item.get("sid", cnt)] = item["_text"]
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Text"] = ref_text_dict_clean
# print "ref_text_dict_clean", ref_text_dict_clean
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Offset"] = parsed_ref_offset
except:
print("Error in Reference Offset")
continue
try:
ref_discourse_facet = line_data["Discourse Facet"]
parsed_discourse_facet = []
if len(ref_discourse_facet) > 0:
if ref_discourse_facet[0] == "[":
parsed_discourse_facet_tmp = [x.strip().lower().replace(" ", "_") for x in ref_discourse_facet[1:-1].split(",")]
parsed_discourse_facet = []
for ref in parsed_discourse_facet_tmp:
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
else:
parsed_discourse_facet_tmp = [x.strip().lower().replace(" ", "_") for x in ref_discourse_facet.split(",")]
parsed_discourse_facet = []
for ref in parsed_discourse_facet_tmp:
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
print("parsed_discourse_facet", parsed_discourse_facet)
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Discourse Facet"] = parsed_discourse_facet
except:
print("Error in Discourse Facet")
continue
# print(json.dumps(parse_data, sort_keys=True, indent=4))
# print("###################################################################################################################")
return parse_data
def calculate(gold_data, submit_data):
# print(json.dumps(gold_data, indent=4, sort_keys=True))
# print(json.dumps(submit_data, indent=4, sort_keys=True))
[TP_ref, FN_ref, FP_ref, TP_facet, FN_facet, FP_facet] = [0, 0, 0, 0, 0, 0]
for ref_article in gold_data:
for cit_article in gold_data[ref_article]:
for cit_marker_offset in gold_data[ref_article][cit_article]:
old_TP_ref = TP_ref
try:
for ref_offset in gold_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]:
try:
ref_offset_list = submit_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]
if ref_offset in ref_offset_list:
TP_ref += 1
gold_data[ref_article][cit_article][cit_marker_offset]["comparable"] = True
else:
FN_ref += 1
except KeyError as e:
print("IGNORE THIS: key error 1")
FN_ref += 1
except: continue
for ref_article in submit_data:
for cit_article in submit_data[ref_article]:
for cit_marker_offset in submit_data[ref_article][cit_article]:
try:
for ref_offset in submit_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]:
try:
ref_offset_list = gold_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]
if ref_offset not in ref_offset_list:
FP_ref += 1
except KeyError as e:
print("IGNORE THIS: key error 2")
FP_ref += 1
except: continue
[precision_ref, recall_ref, f_ref] = [0.0, 0.0, 0.0]
try:
precision_ref = TP_ref / float(TP_ref + FP_ref)
except ZeroDivisionError as e:
precision_ref = 0
try:
recall_ref = TP_ref / float(TP_ref + FN_ref)
except ZeroDivisionError as e:
recall_ref = 0
try:
f_ref = 2.0 * precision_ref * recall_ref / float(precision_ref + recall_ref)
except ZeroDivisionError as e:
f_ref = 0
for ref_article in gold_data:
for cit_article in gold_data[ref_article]:
for cit_marker_offset in gold_data[ref_article][cit_article]:
try:
for facet in gold_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
if gold_data[ref_article][cit_article][cit_marker_offset]["comparable"]:
print("\n\n")
print(ref_article)
print(cit_article)
print(cit_marker_offset)
print(facet)
print(submit_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"])
try:
if facet in submit_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
TP_facet += 1
else:
FN_facet += 1
except KeyError as e:
print("IGNORE THIS: Key error 4")
FN_facet += 1
else:
FN_facet += 1
except: continue
for ref_article in submit_data:
for cit_article in submit_data[ref_article]:
for cit_marker_offset in submit_data[ref_article][cit_article]:
try:
for facet in submit_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
try:
if gold_data[ref_article][cit_article][cit_marker_offset]["comparable"]:
if facet not in gold_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
FP_facet += 1
except KeyError as e:
print("IGNORE THIS: Key error 5")
FP_facet += 1
except: continue
[precision_facet, recall_facet, f_facet] = [0.0, 0.0, 0.0]
try:
precision_facet = TP_facet / float(TP_facet + FP_facet)
except ZeroDivisionError as e:
precision_facet = 0
try:
recall_facet = TP_facet / float(TP_facet + FN_facet)
except ZeroDivisionError as e:
recall_facet = 0
try:
f_facet = 2.0 * precision_facet * recall_facet / float(precision_facet + recall_facet)
except ZeroDivisionError as e:
f_facet = 0
return (precision_ref, recall_ref, f_ref, precision_facet, recall_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet)
def evaluate(gold_file, submit_file, score_file):
# print(gold_file)
# print(submit_file)
gold_data = parse_csv(gold_file)
submit_data = parse_csv(submit_file)
(p_ref, r_ref, f_ref, p_facet, r_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet) = calculate(gold_data, submit_data)
with open(score_file, "a") as f:
f.write(os.path.basename(gold_file) + "_task1a_precision: " + str(p_ref) + "\n")
f.write(os.path.basename(gold_file) + "_task1a_recall: " + str(r_ref) + "\n")
f.write(os.path.basename(gold_file) + "_task1a_f1: " + str(f_ref) + "\n")
f.write(os.path.basename(gold_file) + "_task1b_precision: " + str(p_facet) + "\n")
f.write(os.path.basename(gold_file) + "_task1b_recall: " + str(r_facet) + "\n")
f.write(os.path.basename(gold_file) + "_task1b_f1: " + str(f_facet) + "\n")
return (p_ref, r_ref, f_ref, p_facet, r_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet)
def main(input_dir, output_dir):
if not os.path.exists(input_dir):
print("%s not a valid director" % input_dir)
if not os.path.exists(output_dir):
print("%s not a valid director" % output_dir)
truth_dir = os.path.join(input_dir, "ref", "Task1")
if not os.path.exists(truth_dir):
print("%s not a valid director" % truth_dir)
submit_dir = os.path.join(input_dir, "res", "Task1")
if not os.path.exists(submit_dir):
print("%s not a valid director" % submit_dir)
score_file = os.path.join(output_dir, "scores.txt")
if os.path.exists(score_file):
os.remove(score_file)
P_ref_list = []
P_facet_list = []
R_ref_list = []
R_facet_list = []
F_ref_list = []
F_facet_list = []
TP_ref_list = []
FP_ref_list = []
FN_ref_list = []
TP_facet_list = []
FP_facet_list = []
FN_facet_list = []
for gold_file in os.listdir(truth_dir):
print("gold file",gold_file)
if gold_file.startswith('.'):
continue
paper_id = gold_file.split('_')[0]
submit_file = os.path.join(submit_dir, paper_id +".annv3.csv")
print("submit_file",submit_file)
if not os.path.exists(submit_file):
print("No submit file")
continue
(p_ref, r_ref, f_ref, p_facet, r_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet) = evaluate(os.path.join(truth_dir, gold_file), submit_file, score_file)
P_ref_list.append(p_ref)
P_facet_list.append(p_facet)
R_ref_list.append(r_ref)
R_facet_list.append(r_facet)
F_ref_list.append(f_ref)
F_facet_list.append(f_facet)
TP_ref_list.append(TP_ref)
FP_ref_list.append(FP_ref)
FN_ref_list.append(FN_ref)
TP_facet_list.append(TP_facet)
FP_facet_list.append(FP_facet)
FN_facet_list.append(FN_facet)
TP_ref_sum = sum(TP_ref_list)
FP_ref_sum = sum(FP_ref_list)
FN_ref_sum = sum(FN_ref_list)
TP_facet_sum = sum(TP_facet_list)
FP_facet_sum = sum(FP_facet_list)
FN_facet_sum = sum(FN_facet_list)
try:
precision_ref_micro = TP_ref_sum / float(TP_ref_sum + FP_ref_sum)
except ZeroDivisionError as e:
precision_ref_micro = 0
try:
recall_ref_micro = TP_ref_sum / float(TP_ref_sum + FN_ref_sum)
except ZeroDivisionError as e:
recall_ref_micro = 0
try:
f_ref_micro = 2.0 * precision_ref_micro * recall_ref_micro / float(precision_ref_micro + recall_ref_micro)
except ZeroDivisionError as e:
f_ref_micro = 0
try:
precision_ref_macro = sum(P_ref_list) / len(P_ref_list)
except ZeroDivisionError as e:
precision_ref_macro = 0
try:
recall_ref_macro = sum(R_ref_list) / len(R_ref_list)
except ZeroDivisionError as e:
recall_ref_macro = 0
try:
f_ref_macro = 2.0 * precision_ref_macro * recall_ref_macro / float(precision_ref_macro + recall_ref_macro)
except ZeroDivisionError as e:
f_ref_macro = 0
try:
# precision_facet_micro = TP_ref_sum / float(TP_ref_sum + FP_ref_sum)
precision_facet_micro = TP_facet_sum / float(TP_facet_sum + FP_facet_sum)
except ZeroDivisionError as e:
precision_facet_micro = 0
try:
# recall_facet_micro = TP_ref_sum / float(TP_ref_sum + FN_ref_sum)
recall_facet_micro = TP_facet_sum / float(TP_facet_sum + FN_facet_sum)
except ZeroDivisionError as e:
recall_facet_micro = 0
try:
# f_facet_micro = 2.0 * precision_ref_micro * recall_ref_micro / float(precision_ref_micro + recall_ref_micro)
f_facet_micro = 2.0 * precision_facet_micro * recall_facet_micro / float(precision_facet_micro + recall_facet_micro)
except ZeroDivisionError as e:
f_facet_micro = 0
try:
precision_facet_macro = sum(P_facet_list) / len(P_facet_list)
except ZeroDivisionError as e:
precision_facet_macro = 0
try:
recall_facet_macro = sum(R_facet_list) / len(R_facet_list)
except ZeroDivisionError as e:
recall_facet_macro = 0
try:
f_facet_macro = 2.0 * precision_facet_macro * recall_facet_macro / float(precision_facet_macro + recall_facet_macro)
except ZeroDivisionError as e:
f_facet_macro = 0
with open(score_file, "a") as f:
f.write("task1a_precision_micro_avg: " + str(precision_ref_micro) + "\n")
f.write("task1a_precision_macro_avg: " + str(precision_ref_macro) + "\n")
f.write("task1a_recall_micro_avg: " + str(recall_ref_micro) + "\n")
f.write("task1a_recall_macro_avg: " + str(recall_ref_macro) + "\n")
f.write("task1a_f1_micro_avg: " + str(f_ref_micro) + "\n")
f.write("task1a_f1_macro_avg: " + str(f_ref_macro) + "\n")
f.write("task1b_precision_micro_avg: " + str(precision_facet_micro) + "\n")
f.write("task1b_precision_macro_avg: " + str(precision_facet_macro) + "\n")
f.write("task1b_recall_micro_avg: " + str(recall_facet_micro) + "\n")
f.write("task1b_recall_macro_avg: " + str(recall_facet_macro) + "\n")
f.write("task1b_f1_micro_avg: " + str(f_facet_micro) + "\n")
f.write("task1b_f1_macro_avg: " + str(f_facet_macro) + "\n")
if __name__ == "__main__":
input_dir = sys.argv[1]
output_dir = sys.argv[2]
main(input_dir, output_dir)
| 47.422397
| 182
| 0.579335
|
ddd91633678a6ec96de6416dad9a34b839e6b84a
| 10,086
|
py
|
Python
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content_from_url_async.py
|
winest/azure-sdk-for-python
|
5557136179aa3df49ab9c4abfd3b6d1a9a38975c
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content_from_url_async.py
|
winest/azure-sdk-for-python
|
5557136179aa3df49ab9c4abfd3b6d1a9a38975c
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content_from_url_async.py
|
winest/azure-sdk-for-python
|
5557136179aa3df49ab9c4abfd3b6d1a9a38975c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from azure.core.exceptions import HttpResponseError, ServiceRequestError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer._generated.v2_1.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_content_result
from azure.ai.formrecognizer.aio import FormRecognizerClient
from azure.ai.formrecognizer import FormRecognizerApiVersion
from preparers import FormRecognizerPreparer
from asynctestcase import AsyncFormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
FormRecognizerClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestContentFromUrlAsync(AsyncFormRecognizerTest):
@FormRecognizerPreparer()
async def test_content_url_auth_bad_key(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential("xxxx"))
with self.assertRaises(ClientAuthenticationError):
async with client:
poller = await client.begin_recognize_content_from_url(self.invoice_url_pdf)
result = await poller.result()
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_url_pass_stream(self, client):
with open(self.receipt_jpg, "rb") as fd:
receipt = fd.read(4) # makes the recording smaller
with self.assertRaises(HttpResponseError):
async with client:
poller = await client.begin_recognize_content_from_url(receipt)
result = await poller.result()
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_url_transform_pdf(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
async with client:
poller = await client.begin_recognize_content_from_url(self.invoice_url_pdf, cls=callback)
result = await poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
# Check form pages
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_url_pdf(self, client):
async with client:
poller = await client.begin_recognize_content_from_url(self.invoice_url_pdf)
result = await poller.result()
self.assertEqual(len(result), 1)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertFormPagesHasValues(result)
self.assertEqual(layout.tables[0].row_count, 3)
self.assertEqual(layout.tables[0].column_count, 5)
self.assertEqual(layout.tables[0].page_number, 1)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_multipage_url(self, client):
async with client:
poller = await client.begin_recognize_content_from_url(self.multipage_url_pdf)
result = await poller.result()
self.assertEqual(len(result), 3)
self.assertFormPagesHasValues(result)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_multipage_transform_url(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
async with client:
poller = await client.begin_recognize_content_from_url(self.multipage_url_pdf, cls=callback)
result = await poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
# Check form pages
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_continuation_token(self, client):
async with client:
initial_poller = await client.begin_recognize_content_from_url(self.form_url_jpg)
cont_token = initial_poller.continuation_token()
poller = await client.begin_recognize_content_from_url(None, continuation_token=cont_token)
result = await poller.result()
self.assertIsNotNone(result)
await initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_multipage_table_span_pdf(self, client):
async with client:
poller = await client.begin_recognize_content_from_url(self.multipage_table_url_pdf)
result = await poller.result()
self.assertEqual(len(result), 2)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertEqual(len(layout.tables), 2)
self.assertEqual(layout.tables[0].row_count, 29)
self.assertEqual(layout.tables[0].column_count, 4)
self.assertEqual(layout.tables[0].page_number, 1)
self.assertEqual(layout.tables[1].row_count, 6)
self.assertEqual(layout.tables[1].column_count, 5)
self.assertEqual(layout.tables[1].page_number, 1)
layout = result[1]
self.assertEqual(len(layout.tables), 1)
self.assertEqual(layout.page_number, 2)
self.assertEqual(layout.tables[0].row_count, 23)
self.assertEqual(layout.tables[0].column_count, 5)
self.assertEqual(layout.tables[0].page_number, 2)
self.assertFormPagesHasValues(result)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_selection_marks(self, client):
async with client:
poller = await client.begin_recognize_content_from_url(form_url=self.selection_mark_url_pdf)
result = await poller.result()
self.assertEqual(len(result), 1)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertFormPagesHasValues(result)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
async def test_content_selection_marks_v2(self, client):
async with client:
poller = await client.begin_recognize_content_from_url(form_url=self.selection_mark_url_pdf)
result = await poller.result()
self.assertEqual(len(result), 1)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertFormPagesHasValues(result)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_specify_pages(self, client):
async with client:
poller = await client.begin_recognize_content_from_url(self.multipage_url_pdf, pages=["1"])
result = await poller.result()
assert len(result) == 1
poller = await client.begin_recognize_content_from_url(self.multipage_url_pdf, pages=["1", "3"])
result = await poller.result()
assert len(result) == 2
poller = await client.begin_recognize_content_from_url(self.multipage_url_pdf, pages=["1-2"])
result = await poller.result()
assert len(result) == 2
poller = await client.begin_recognize_content_from_url(self.multipage_url_pdf, pages=["1-2", "3"])
result = await poller.result()
assert len(result) == 3
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_reading_order(self, client):
async with client:
poller = await client.begin_recognize_content_from_url(self.form_url_jpg, reading_order="natural")
assert 'natural' == poller._polling_method._initial_response.http_response.request.query['readingOrder']
result = await poller.result()
assert result
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_language_specified(self, client):
async with client:
poller = await client.begin_recognize_content_from_url(self.form_url_jpg, language="de")
assert 'de' == poller._polling_method._initial_response.http_response.request.query['language']
result = await poller.result()
assert result
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_content_language_error(self, client):
async with client:
with pytest.raises(HttpResponseError) as e:
await client.begin_recognize_content_from_url(self.form_url_jpg, language="not a language")
assert "NotSupportedLanguage" == e.value.error.code
@FormRecognizerPreparer()
@FormRecognizerClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
async def test_content_language_v2(self, client):
async with client:
with pytest.raises(ValueError) as e:
await client.begin_recognize_content_from_url(self.form_url_jpg, language="en")
assert "'language' is only available for API version V2_1 and up" in str(e.value)
| 45.026786
| 116
| 0.705632
|
09250aa318544ad98c244eff2e3eb954ab131659
| 13,331
|
py
|
Python
|
stringlifier/modules/stringc2.py
|
atreyamaj/stringlifier
|
97bc8bd01cbb87ec910112809d2a65d73f57d2fc
|
[
"ECL-2.0",
"Apache-2.0"
] | 130
|
2020-08-20T17:57:43.000Z
|
2022-03-11T07:18:43.000Z
|
stringlifier/modules/stringc2.py
|
atreyamaj/stringlifier
|
97bc8bd01cbb87ec910112809d2a65d73f57d2fc
|
[
"ECL-2.0",
"Apache-2.0"
] | 14
|
2020-08-26T06:18:13.000Z
|
2021-08-03T01:52:59.000Z
|
stringlifier/modules/stringc2.py
|
atreyamaj/stringlifier
|
97bc8bd01cbb87ec910112809d2a65d73f57d2fc
|
[
"ECL-2.0",
"Apache-2.0"
] | 15
|
2020-08-24T17:43:24.000Z
|
2021-11-04T08:42:36.000Z
|
#
# Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
import optparse
import sys
import json
import numpy as np
import random
import tqdm
class Encodings:
def __init__(self, filename=None):
self._char2int = {'<PAD>': 0, '<UNK>': 1}
self._label2int = {'<PAD>': 0}
self._label_list = ['<PAD>']
if filename is not None:
self.load(filename)
def save(self, filename):
json.dump({'char2int': self._char2int, 'label2int': self._label2int},
open(filename, 'w'))
def load(self, file):
if isinstance(file, str):
stream = open(file, 'r')
else:
stream = file
obj = json.load(stream)
self._char2int = obj['char2int']
self._label2int = obj['label2int']
self._label_list = [None for _ in range(len(self._label2int))]
for t in self._label2int:
self._label_list[self._label2int[t]] = t
def update_encodings(self, dataset, cutoff=2):
char2count = {}
for entry in tqdm.tqdm(dataset):
text = entry[0]
label = entry[1]
for char in text:
char = char.lower()
if char in char2count:
char2count[char] += 1
else:
char2count[char] = 1
for ttype in label:
if ttype not in self._label2int:
self._label2int[ttype] = len(self._label2int)
self._label_list.append(ttype)
for char in char2count:
if char not in self._char2int and char2count[char] > cutoff:
self._char2int[char] = len(self._char2int)
class CTaggerConfig:
def __init__(self):
self.char_emb_size = 100
self.rnn_layers = 2
self.rnn_size = 100
self.hidden = 500
def save(self, filename):
json.dump({'char_emb_size': self.char_emb_size, 'rnn_layers': self.rnn_layers, 'rnn_size': self.rnn_size,
'hidden': self.hidden},
open(filename, 'w'))
def load(self, file):
if isinstance(file, str):
stream = open(file, 'r')
else:
stream = file
obj = json.load(stream)
self.char_emb_size = obj['char_emb_size']
self.rnn_size = obj['rnn_size']
self.rnn_layers = obj['rnn_layers']
self.hidden = obj['hidden']
class CTagger(nn.Module):
def __init__(self, config, encodings):
super(CTagger, self).__init__()
self._config = config
self._encodings = encodings
self._char_emb = nn.Embedding(len(encodings._char2int), config.char_emb_size, padding_idx=0)
self._case_emb = nn.Embedding(4, 16, padding_idx=0)
self._rnn = nn.LSTM(config.char_emb_size + 16, config.rnn_size, config.rnn_layers, batch_first=True,
bidirectional=True)
self._hidden = nn.Sequential(nn.Linear(config.rnn_size * 2, config.hidden), nn.Tanh(), nn.Dropout(0.5))
self._softmax_type = nn.Linear(config.hidden, len(encodings._label2int))
def _make_input(self, word_list):
# we pad domain names and feed them in reversed character order to the LSTM
max_seq_len = max([len(word) for word in word_list])
x_char = np.zeros((len(word_list), max_seq_len))
x_case = np.zeros((len(word_list), max_seq_len))
for iBatch in range(x_char.shape[0]):
word = word_list[iBatch]
for index in range(len(word)):
char = word[index]
case_idx = 0
if char.lower() == char.upper():
case_idx = 1 # symbol
elif char.lower() != char:
case_idx = 2 # uppercase
else:
case_idx = 3 # lowercase
char = char.lower()
if char in self._encodings._char2int:
char_idx = self._encodings._char2int[char]
else:
char_idx = 1 # UNK
x_char[iBatch, index] = char_idx
x_case[iBatch, index] = case_idx
return x_char, x_case
def forward(self, string_list):
x_char, x_case = self._make_input(string_list)
x_char = torch.tensor(x_char, dtype=torch.long, device=self._get_device())
x_case = torch.tensor(x_case, dtype=torch.long, device=self._get_device())
hidden = torch.cat([self._char_emb(x_char), self._case_emb(x_case)], dim=-1)
hidden = torch.dropout(hidden, 0.5, self.training)
output, _ = self._rnn(hidden)
hidden = self._hidden(output)
return self._softmax_type(hidden)
def save(self, path):
torch.save(self.state_dict(), path)
def load(self, path):
self.load_state_dict(torch.load(path, map_location='cpu'))
def _get_device(self):
if self._char_emb.weight.device.type == 'cpu':
return 'cpu'
return '{0}:{1}'.format(self._char_emb.weight.device.type, str(self._char_emb.weight.device.index))
def _load_dataset(filename):
lines = open(filename, encoding='utf-8').readlines()
dataset = []
for ii in range(len(lines) // 2):
string = lines[ii * 2][:-1]
mask = lines[ii * 2 + 1][:-1]
dataset.append((string, mask))
return dataset
def _eval(model, dataset, encodings):
model.eval()
test_x, test_y = _make_batches(dataset, batch_size=128)
total_t = 0
ok_t = 0
with torch.no_grad():
pgb = tqdm.tqdm(zip(test_x, test_y), total=len(test_x), ncols=80, desc='\t\t\t\t')
for x, y in pgb:
y_pred_t = model(x)
y_tar_t = _get_targets(y, encodings).reshape(-1)
y_pred_t = torch.argmax(y_pred_t, dim=-1).detach().cpu().numpy().reshape(-1)
for y_t_t, y_p_t in zip(y_tar_t, y_pred_t):
if y_t_t != 0:
total_t += 1
if y_t_t == y_p_t:
ok_t += 1
return ok_t / total_t
def _make_batches(dataset, batch_size=32):
batches_x = []
batches_y = []
batch_x = []
batch_y = []
for entry in dataset:
domain = entry[0]
t = entry[1]
batch_x.append(domain)
batch_y.append(t)
if len(batch_x) == batch_size:
batches_x.append(batch_x)
batches_y.append(batch_y)
batch_x = []
batch_y = []
if len(batch_x) != 0:
batches_x.append(batch_x)
batches_y.append(batch_y)
return batches_x, batches_y
def _get_targets(y, encodings):
max_len = max([len(yy) for yy in y])
y_t = np.zeros((len(y), max_len), dtype=np.long)
for i in range(len(y)):
for j in range(max_len):
if j < len(y[i]):
y_t[i, j] = encodings._label2int[y[i][j]]
return y_t
def _generate_dataset(count):
from training import generate_next_cmd
dataset = []
for ii in range(count):
cmd, mask = generate_next_cmd()
dataset.append((cmd, mask))
return dataset
def _start_train(params):
eval_at = 5000
if params.resume:
encodings = Encodings('{0}.encodings'.format(params.output_base))
else:
sys.stdout.write('Generating new random data...')
sys.stdout.flush()
trainset = _generate_dataset(int(eval_at * 4 * params.batch_size))
sys.stdout.write('done\n')
encodings = Encodings()
encodings.update_encodings(trainset)
print('chars={0}, types={1}'.format(len(encodings._char2int), len(encodings._label2int)))
print(encodings._label2int)
config = CTaggerConfig()
if params.resume:
config.load('{0}.conf'.format(params.output_base))
model = CTagger(config, encodings)
model.to(params.device)
if params.resume:
model.load('{0}.last'.format(params.output_base))
optimizer = torch.optim.Adam(model.parameters())
criterion_t = torch.nn.CrossEntropyLoss(ignore_index=0)
patience_left = params.patience
best_type = 0 # _eval(model, devset, encodings)
encodings.save('{0}.encodings'.format(params.output_base))
config.save('{0}.conf'.format(params.output_base))
model.save('{0}.last'.format(params.output_base))
print("Deveset evaluation acc={0}".format(best_type))
epoch = 0
eval_at = 5000
while patience_left > 0:
sys.stdout.write('Generating new random data...')
sys.stdout.flush()
trainset = _generate_dataset(int(eval_at * params.batch_size))
devset = _generate_dataset(int(eval_at / 10 * params.batch_size))
sys.stdout.write('done\n')
sys.stdout.flush()
sys.stderr.flush()
epoch += 1
random.shuffle(trainset)
train_x, train_y = _make_batches(trainset, batch_size=params.batch_size)
sys.stdout.write('Starting epoch {0}\n'.format(epoch))
pgb = tqdm.tqdm(zip(train_x, train_y), total=len(train_x), ncols=80, desc='\tloss=N/A')
model.train()
total_loss = 0
cnt = 0
for x, y in pgb:
cnt += 1
if cnt % eval_at == 0:
patience_left -= 1
sys.stderr.flush()
sys.stderr.flush()
sys.stderr.write('\n\tEvaluating...')
sys.stderr.flush()
acc_t = _eval(model, devset, encodings)
sys.stderr.write(' acc={0}\n'.format(acc_t))
sys.stderr.flush()
filename = '{0}.last'.format(params.output_base)
sys.stderr.write('\t\tStoring {0}\n'.format(filename))
sys.stderr.flush()
model.save(filename)
if acc_t > best_type:
patience_left = params.patience
best_type = acc_t
filename = '{0}.bestType'.format(params.output_base)
sys.stderr.write('\t\tStoring {0}\n'.format(filename))
sys.stderr.flush()
model.save(filename)
sys.stderr.write('\n')
sys.stderr.flush()
model.train()
if patience_left <= 0:
print("Stopping with maximum patience reached")
sys.exit(0)
y_pred_t = model(x)
y_tar_t = _get_targets(y, encodings)
y_tar_t = torch.tensor(y_tar_t, dtype=torch.long, device=params.device)
y_pred = y_pred_t.view(-1, y_pred_t.shape[-1])
y_target = y_tar_t.view(-1)
if y_pred.shape[0] != y_target.shape[0]:
from ipdb import set_trace
set_trace()
loss = criterion_t(y_pred, y_target)
optimizer.zero_grad()
total_loss += loss.item()
pgb.set_description('\tloss={0:.4f}'.format(total_loss / cnt))
loss.backward()
optimizer.step()
sys.stdout.write('AVG train loss={0} \n'.format(total_loss / len(train_x)))
def _start_interactive(params):
encodings = Encodings('{0}.encodings'.format(params.output_base))
config = CTaggerConfig()
config.load('{0}.conf'.format(params.output_base))
model = CTagger(config, encodings)
model.load('{0}.bestType'.format(params.output_base))
model.to(params.device)
model.eval()
sys.stdout.write('>>> ')
sys.stdout.flush()
string = input()
while string != '/exit':
p_t = model([string])
p_d_t = torch.argmax(p_t, dim=-1).detach().cpu().numpy()
print("Results for \n{0}".format(string))
for ii in range(p_d_t.shape[-1]):
sys.stdout.write(encodings._label_list[p_d_t[0, ii]])
sys.stdout.write('\n')
print("")
sys.stdout.write('>>> ')
sys.stdout.flush()
string = input()
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--interactive', action='store_true', dest='interactive')
parser.add_option('--train', action='store_true', dest='train')
parser.add_option('--resume', action='store_true', dest='resume')
parser.add_option('--store', action='store', dest='output_base')
parser.add_option('--patience', action='store', dest='patience', type='int', default=20, help='(default=20)')
parser.add_option('--batch-size', action='store', dest='batch_size', default=32, type='int', help='(default=32)')
parser.add_option('--device', action='store', dest='device', default='cpu')
(params, _) = parser.parse_args(sys.argv)
if params.train:
_start_train(params)
elif params.interactive:
_start_interactive(params)
else:
parser.print_help()
| 34.716146
| 117
| 0.586378
|
547b053d3833ccfa4e0dd3740fa460f6665dcaa6
| 1,037
|
py
|
Python
|
attribute_filter/base.py
|
DragonDriver/ann-benchmarks
|
25be1cf05e7513b3a89c3bca11184629457e60bc
|
[
"MIT"
] | null | null | null |
attribute_filter/base.py
|
DragonDriver/ann-benchmarks
|
25be1cf05e7513b3a89c3bca11184629457e60bc
|
[
"MIT"
] | null | null | null |
attribute_filter/base.py
|
DragonDriver/ann-benchmarks
|
25be1cf05e7513b3a89c3bca11184629457e60bc
|
[
"MIT"
] | 2
|
2020-08-15T08:51:26.000Z
|
2020-09-27T06:45:34.000Z
|
from __future__ import absolute_import
import psutil
class BaseANN(object):
def done(self):
pass
def get_memory_usage(self):
"""Return the current memory usage of this algorithm instance
(in kilobytes), or None if this information is not available."""
# return in kB for backwards compatibility
return psutil.Process().memory_info().rss / 1024
def support_batch_fit(self):
return False
def already_fit(self, total_num):
return False
def batch_fit(self, X, total_num):
pass
def fit(self, X):
pass
def query(self, q, n):
return [] # array of candidate indices
def handle_query_list_result(self, query_list):
return 0, query_list
def batch_query(self, X, n):
self.res = []
for q in X:
self.res.append(self.query(q, n))
def get_batch_results(self):
return self.res
def get_additional(self):
return {}
def __str__(self):
return self.name
| 21.604167
| 72
| 0.619094
|
ab09a251bcf209d7678219416f7b846df4ee6782
| 923
|
py
|
Python
|
test/views.py
|
seanchon/django-health-monitor
|
ac53757d29b161bdb4bc5ff7db0fe61323a27e94
|
[
"Apache-2.0"
] | 6
|
2017-04-27T19:27:49.000Z
|
2020-06-04T05:46:53.000Z
|
test/views.py
|
seanchon/django-health-monitor
|
ac53757d29b161bdb4bc5ff7db0fe61323a27e94
|
[
"Apache-2.0"
] | 6
|
2017-06-05T21:00:20.000Z
|
2020-06-30T03:49:00.000Z
|
test/views.py
|
seanchon/django-health-monitor
|
ac53757d29b161bdb4bc5ff7db0fe61323a27e94
|
[
"Apache-2.0"
] | 3
|
2017-12-07T01:53:41.000Z
|
2019-12-04T03:43:02.000Z
|
"""
Copyright 2017 Gracenote
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from health_monitor.views import HealthTestView, HealthAlarmView, HealthView
from test.models import BodyHealth, BodyHealthAlarm
class BodyHealthView(HealthView):
health_model = BodyHealth
class BodyHealthAlarmView(HealthAlarmView):
health_alarm_model = BodyHealthAlarm
class BodyHealthTestView(HealthTestView):
pass
| 30.766667
| 76
| 0.774648
|
552255b693ceb03c9a00e98f5f5da8d8bacaca73
| 10,322
|
py
|
Python
|
discpy/ui/button.py
|
AryamanSrii/DiscPy
|
0ba89da9ca184f0dfaebeedd4e9b7bc3099a0353
|
[
"MIT"
] | null | null | null |
discpy/ui/button.py
|
AryamanSrii/DiscPy
|
0ba89da9ca184f0dfaebeedd4e9b7bc3099a0353
|
[
"MIT"
] | null | null | null |
discpy/ui/button.py
|
AryamanSrii/DiscPy
|
0ba89da9ca184f0dfaebeedd4e9b7bc3099a0353
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2021 The DiscPy Developers
Copyright (c) 2015-2021 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Callable, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
import inspect
import os
from .item import Item, ItemCallbackType
from ..enums import ButtonStyle, ComponentType
from ..partial_emoji import PartialEmoji, _EmojiTag
from ..components import Button as ButtonComponent
__all__ = (
"Button",
"button",
)
if TYPE_CHECKING:
from .view import View
from ..emoji import Emoji
B = TypeVar("B", bound="Button")
V = TypeVar("V", bound="View", covariant=True)
class Button(Item[V]):
"""Represents a UI button.
.. versionadded:: 2.0
Parameters
------------
style: :class:`discpy.ButtonStyle`
The style of the button.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
url: Optional[:class:`str`]
The URL this button sends you to.
disabled: :class:`bool`
Whether the button is disabled or not.
label: Optional[:class:`str`]
The label of the button, if any.
emoji: Optional[Union[:class:`.PartialEmoji`, :class:`.Emoji`, :class:`str`]]
The emoji of the button, if available.
row: Optional[:class:`int`]
The relative row this button belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
__item_repr_attributes__: Tuple[str, ...] = (
"style",
"url",
"disabled",
"label",
"emoji",
"row",
)
def __init__(
self,
*,
style: ButtonStyle = ButtonStyle.secondary,
label: Optional[str] = None,
disabled: bool = False,
custom_id: Optional[str] = None,
url: Optional[str] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
row: Optional[int] = None,
):
super().__init__()
if custom_id is not None and url is not None:
raise TypeError("cannot mix both url and custom_id with Button")
self._provided_custom_id = custom_id is not None
if url is None and custom_id is None:
custom_id = os.urandom(16).hex()
if url is not None:
style = ButtonStyle.link
if emoji is not None:
if isinstance(emoji, str):
emoji = PartialEmoji.from_str(emoji)
elif isinstance(emoji, _EmojiTag):
emoji = emoji._to_partial()
else:
raise TypeError(
f"expected emoji to be str, Emoji, or PartialEmoji not {emoji.__class__}"
)
self._underlying = ButtonComponent._raw_construct(
type=ComponentType.button,
custom_id=custom_id,
url=url,
disabled=disabled,
label=label,
style=style,
emoji=emoji,
)
self.row = row
@property
def style(self) -> ButtonStyle:
""":class:`discpy.ButtonStyle`: The style of the button."""
return self._underlying.style
@style.setter
def style(self, value: ButtonStyle):
self._underlying.style = value
@property
def custom_id(self) -> Optional[str]:
"""Optional[:class:`str`]: The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
"""
return self._underlying.custom_id
@custom_id.setter
def custom_id(self, value: Optional[str]):
if value is not None and not isinstance(value, str):
raise TypeError("custom_id must be None or str")
self._underlying.custom_id = value
@property
def url(self) -> Optional[str]:
"""Optional[:class:`str`]: The URL this button sends you to."""
return self._underlying.url
@url.setter
def url(self, value: Optional[str]):
if value is not None and not isinstance(value, str):
raise TypeError("url must be None or str")
self._underlying.url = value
@property
def disabled(self) -> bool:
""":class:`bool`: Whether the button is disabled or not."""
return self._underlying.disabled
@disabled.setter
def disabled(self, value: bool):
self._underlying.disabled = bool(value)
@property
def label(self) -> Optional[str]:
"""Optional[:class:`str`]: The label of the button, if available."""
return self._underlying.label
@label.setter
def label(self, value: Optional[str]):
self._underlying.label = str(value) if value is not None else value
@property
def emoji(self) -> Optional[PartialEmoji]:
"""Optional[:class:`.PartialEmoji`]: The emoji of the button, if available."""
return self._underlying.emoji
@emoji.setter
def emoji(self, value: Optional[Union[str, Emoji, PartialEmoji]]): # type: ignore
if value is not None:
if isinstance(value, str):
self._underlying.emoji = PartialEmoji.from_str(value)
elif isinstance(value, _EmojiTag):
self._underlying.emoji = value._to_partial()
else:
raise TypeError(
f"expected str, Emoji, or PartialEmoji, received {value.__class__} instead"
)
else:
self._underlying.emoji = None
@classmethod
def from_component(cls: Type[B], button: ButtonComponent) -> B:
return cls(
style=button.style,
label=button.label,
disabled=button.disabled,
custom_id=button.custom_id,
url=button.url,
emoji=button.emoji,
row=None,
)
@property
def type(self) -> ComponentType:
return self._underlying.type
def to_component_dict(self):
return self._underlying.to_dict()
def is_dispatchable(self) -> bool:
return self.custom_id is not None
def is_persistent(self) -> bool:
if self.style is ButtonStyle.link:
return self.url is not None
return super().is_persistent()
def refresh_component(self, button: ButtonComponent) -> None:
self._underlying = button
def button(
*,
label: Optional[str] = None,
custom_id: Optional[str] = None,
disabled: bool = False,
style: ButtonStyle = ButtonStyle.secondary,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
row: Optional[int] = None,
) -> Callable[[ItemCallbackType], ItemCallbackType]:
"""A decorator that attaches a button to a component.
The function being decorated should have three parameters, ``self`` representing
the :class:`discpy.ui.View`, the :class:`discpy.ui.Button` being pressed and
the :class:`discpy.Interaction` you receive.
.. note::
Buttons with a URL cannot be created with this function.
Consider creating a :class:`Button` manually instead.
This is because buttons with a URL do not have a callback
associated with them since Discord does not do any processing
with it.
Parameters
------------
label: Optional[:class:`str`]
The label of the button, if any.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
It is recommended not to set this parameter to prevent conflicts.
style: :class:`.ButtonStyle`
The style of the button. Defaults to :attr:`.ButtonStyle.grey`.
disabled: :class:`bool`
Whether the button is disabled or not. Defaults to ``False``.
emoji: Optional[Union[:class:`str`, :class:`.Emoji`, :class:`.PartialEmoji`]]
The emoji of the button. This can be in string form or a :class:`.PartialEmoji`
or a full :class:`.Emoji`.
row: Optional[:class:`int`]
The relative row this button belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
def decorator(func: ItemCallbackType) -> ItemCallbackType:
if not inspect.iscoroutinefunction(func):
raise TypeError("button function must be a coroutine function")
func.__discpy_ui_model_type__ = Button
func.__discpy_ui_model_kwargs__ = {
"style": style,
"custom_id": custom_id,
"url": None,
"disabled": disabled,
"label": label,
"emoji": emoji,
"row": row,
}
return func
return decorator
| 34.871622
| 97
| 0.63912
|
ff3b5c0cd6d7164031311a083b869861ccfa41d5
| 96
|
py
|
Python
|
ml_toolkit/webtools/__init__.py
|
TSPereira/support_toolkit
|
d9b0488d69dccc38b73cd67ea33f4f53983cf77f
|
[
"MIT"
] | 4
|
2021-01-05T14:03:54.000Z
|
2021-01-29T14:48:09.000Z
|
ml_toolkit/webtools/__init__.py
|
TSPereira/support_toolkit
|
d9b0488d69dccc38b73cd67ea33f4f53983cf77f
|
[
"MIT"
] | null | null | null |
ml_toolkit/webtools/__init__.py
|
TSPereira/support_toolkit
|
d9b0488d69dccc38b73cd67ea33f4f53983cf77f
|
[
"MIT"
] | null | null | null |
from .url_parsers import LinkParser, parse_url_page
__all__ = ['LinkParser', 'parse_url_page']
| 24
| 51
| 0.791667
|
ec660d0667b2243a875f9bb51f41e6a814730179
| 2,497
|
py
|
Python
|
portional-web/recipe/tests/conftest.py
|
davidhousedev/portional
|
b9a8efab0554d6b1c19aa8b510f3f5a45c91986f
|
[
"Apache-2.0"
] | null | null | null |
portional-web/recipe/tests/conftest.py
|
davidhousedev/portional
|
b9a8efab0554d6b1c19aa8b510f3f5a45c91986f
|
[
"Apache-2.0"
] | 5
|
2021-03-08T23:42:47.000Z
|
2022-02-26T08:29:09.000Z
|
portional-web/recipe/tests/conftest.py
|
davidhousedev/portional
|
b9a8efab0554d6b1c19aa8b510f3f5a45c91986f
|
[
"Apache-2.0"
] | 1
|
2019-01-30T19:09:19.000Z
|
2019-01-30T19:09:19.000Z
|
import pytest
from django.contrib.auth.models import User
from django.conf import settings
from ..models import (
Recipe, RecipeIngredient, Ingredient, Instruction
)
BEEF_KEY = 'beef'
BUN_KEY = 'bun'
INGREDIENT_MAP = {
BEEF_KEY: 'Beef',
BUN_KEY: 'Bun',
}
@pytest.fixture
def ingredients(db):
ings = {
ing_key: Ingredient(orig_name=ing_name)
for ing_key, ing_name in INGREDIENT_MAP.items()
}
for ing in ings.values():
ing.save()
return ings
@pytest.fixture
def recipe(db):
user = User()
user.save()
rec = Recipe(name='Hamburger',
imported_by=user)
rec.save()
return rec
@pytest.fixture
def recipe_ingredients(db, recipe, ingredients):
beef_rec_ing = RecipeIngredient(
ingredient=ingredients[BEEF_KEY],
amount=0.5,
scale='lbs',
recipe=recipe)
bun_rec_ing = RecipeIngredient(
ingredient=ingredients[BUN_KEY],
amount=1,
recipe=recipe)
beef_rec_ing.save()
bun_rec_ing.save()
return {
BEEF_KEY: beef_rec_ing,
BUN_KEY: bun_rec_ing
}
@pytest.fixture
def instructions(db, recipe, recipe_ingredients):
beef_db_id = f'{settings.DB_ID_PREFIX}{recipe_ingredients[BEEF_KEY].id}'
bun_db_id = f'{settings.DB_ID_PREFIX}{recipe_ingredients[BUN_KEY].id}'
inst_1 = Instruction(
orig_text='Grill burger to taste.',
db_id_text=f'Grill {{{beef_db_id}}} '
f'to taste.',
order=1,
recipe=recipe)
inst_2 = Instruction(
orig_text='Place burger within toasted bun.',
db_id_text=f'Place {{{beef_db_id}}} '
f'within toasted '
f'{{{bun_db_id}}}.',
order=2,
recipe=recipe)
inst_3 = Instruction(
orig_text='Serve immediately with your choice of '
'condiments.',
db_id_text=f'Serve immediately with your choice of '
'condiments.',
order=3,
recipe=recipe)
insts = [inst_1, inst_2, inst_3]
for inst in insts:
inst.save()
inst_1.ingredients = (recipe_ingredients[BEEF_KEY],)
inst_2.ingredients = (
recipe_ingredients[BEEF_KEY],
recipe_ingredients[BUN_KEY],
)
for inst in insts:
inst.save()
return insts
@pytest.fixture
def hamburger_recipe(db, recipe, instructions):
"""Ensure that all db objects are created prior to beginning tests."""
return recipe
| 23.336449
| 76
| 0.619543
|
9769ba4e6bc1d11779ca221b5221a9bcced05722
| 1,491
|
py
|
Python
|
assignments/assignment1/metrics.py
|
iptkachev/dlcourse_ai
|
bf4d8c7639fa0597580130ae65e1dea0f26d1b4a
|
[
"MIT"
] | null | null | null |
assignments/assignment1/metrics.py
|
iptkachev/dlcourse_ai
|
bf4d8c7639fa0597580130ae65e1dea0f26d1b4a
|
[
"MIT"
] | null | null | null |
assignments/assignment1/metrics.py
|
iptkachev/dlcourse_ai
|
bf4d8c7639fa0597580130ae65e1dea0f26d1b4a
|
[
"MIT"
] | null | null | null |
def binary_classification_metrics(prediction, ground_truth):
'''
Computes metrics for binary classification
Arguments:
prediction, np array of bool (num_samples) - model predictions
ground_truth, np array of bool (num_samples) - true labels
Returns:
precision, recall, f1, accuracy - classification metrics
'''
precision = 0
recall = 0
accuracy = 0
f1 = 0
# TODO: implement metrics!
# Some helpful links:
# https://en.wikipedia.org/wiki/Precision_and_recall
# https://en.wikipedia.org/wiki/F1_score
count_objects = ground_truth.shape[0]
tp = 0
for i in range(prediction.shape[0]):
if prediction[i]:
tp += (prediction[i] == ground_truth[i])
recall = tp / ground_truth.sum()
precision = tp / prediction.sum()
f1 = 2 * precision * recall / (precision + recall)
accuracy = (prediction == ground_truth).sum() / count_objects
return precision, recall, f1, accuracy
def multiclass_accuracy(prediction, ground_truth):
'''
Computes metrics for multiclass classification
Arguments:
prediction, np array of int (num_samples) - model predictions
ground_truth, np array of int (num_samples) - true labels
Returns:
accuracy - ratio of accurate predictions to total samples
'''
# TODO: Implement computing accuracy
count_objects = ground_truth.shape[0]
return (prediction == ground_truth).sum() / count_objects
| 29.235294
| 66
| 0.667337
|
0a7ae6d658a971121b0c2a3f11b5bbd220484bf8
| 467
|
py
|
Python
|
dictionary.py
|
joeysal/astr-119-hw-2
|
cb63f577370e5835c8ae027e624ad2459e71c3cb
|
[
"MIT"
] | null | null | null |
dictionary.py
|
joeysal/astr-119-hw-2
|
cb63f577370e5835c8ae027e624ad2459e71c3cb
|
[
"MIT"
] | 1
|
2020-10-11T23:17:55.000Z
|
2020-10-17T01:03:17.000Z
|
dictionary.py
|
joeysal/astr-119-hw-2
|
cb63f577370e5835c8ae027e624ad2459e71c3cb
|
[
"MIT"
] | null | null | null |
#define a dictionary data structure
#dictionaries have key:valyue pairs for the elements
example_dict = {
"class" : "ASTR 119",
"prof" : "Brant",
"awesomeness" : 10
}
print ("The type of example_dict is ", type(example_dict))
#get value via key
course = example_dict["class"]
print(course)
example_dict["awesomeness"] += 1
#print the dictionary
print(example_dict)
#print dictionary element by element
for x in example_dict.key():
print(x, example_dict[x])
| 19.458333
| 58
| 0.732334
|
6788b9b0b7a2d97ad274650bae05a0610728d0c2
| 14,569
|
py
|
Python
|
kubernetes/test/test_infrastructure_cluster_xk8s_io_v1alpha4_api.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_infrastructure_cluster_xk8s_io_v1alpha4_api.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_infrastructure_cluster_xk8s_io_v1alpha4_api.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.infrastructure_cluster_xk8s_io_v1alpha4_api import InfrastructureClusterXK8sIoV1alpha4Api # noqa: E501
from kubernetes.client.rest import ApiException
class TestInfrastructureClusterXK8sIoV1alpha4Api(unittest.TestCase):
"""InfrastructureClusterXK8sIoV1alpha4Api unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.infrastructure_cluster_xk8s_io_v1alpha4_api.InfrastructureClusterXK8sIoV1alpha4Api() # noqa: E501
def tearDown(self):
pass
def test_create_aws_cluster_controller_identity(self):
"""Test case for create_aws_cluster_controller_identity
"""
pass
def test_create_aws_cluster_role_identity(self):
"""Test case for create_aws_cluster_role_identity
"""
pass
def test_create_aws_cluster_static_identity(self):
"""Test case for create_aws_cluster_static_identity
"""
pass
def test_create_namespaced_aws_cluster(self):
"""Test case for create_namespaced_aws_cluster
"""
pass
def test_create_namespaced_aws_cluster_template(self):
"""Test case for create_namespaced_aws_cluster_template
"""
pass
def test_create_namespaced_aws_fargate_profile(self):
"""Test case for create_namespaced_aws_fargate_profile
"""
pass
def test_create_namespaced_aws_machine(self):
"""Test case for create_namespaced_aws_machine
"""
pass
def test_create_namespaced_aws_machine_pool(self):
"""Test case for create_namespaced_aws_machine_pool
"""
pass
def test_create_namespaced_aws_machine_template(self):
"""Test case for create_namespaced_aws_machine_template
"""
pass
def test_create_namespaced_aws_managed_machine_pool(self):
"""Test case for create_namespaced_aws_managed_machine_pool
"""
pass
def test_delete_aws_cluster_controller_identity(self):
"""Test case for delete_aws_cluster_controller_identity
"""
pass
def test_delete_aws_cluster_role_identity(self):
"""Test case for delete_aws_cluster_role_identity
"""
pass
def test_delete_aws_cluster_static_identity(self):
"""Test case for delete_aws_cluster_static_identity
"""
pass
def test_delete_collection_aws_cluster_controller_identity(self):
"""Test case for delete_collection_aws_cluster_controller_identity
"""
pass
def test_delete_collection_aws_cluster_role_identity(self):
"""Test case for delete_collection_aws_cluster_role_identity
"""
pass
def test_delete_collection_aws_cluster_static_identity(self):
"""Test case for delete_collection_aws_cluster_static_identity
"""
pass
def test_delete_collection_namespaced_aws_cluster(self):
"""Test case for delete_collection_namespaced_aws_cluster
"""
pass
def test_delete_collection_namespaced_aws_cluster_template(self):
"""Test case for delete_collection_namespaced_aws_cluster_template
"""
pass
def test_delete_collection_namespaced_aws_fargate_profile(self):
"""Test case for delete_collection_namespaced_aws_fargate_profile
"""
pass
def test_delete_collection_namespaced_aws_machine(self):
"""Test case for delete_collection_namespaced_aws_machine
"""
pass
def test_delete_collection_namespaced_aws_machine_pool(self):
"""Test case for delete_collection_namespaced_aws_machine_pool
"""
pass
def test_delete_collection_namespaced_aws_machine_template(self):
"""Test case for delete_collection_namespaced_aws_machine_template
"""
pass
def test_delete_collection_namespaced_aws_managed_machine_pool(self):
"""Test case for delete_collection_namespaced_aws_managed_machine_pool
"""
pass
def test_delete_namespaced_aws_cluster(self):
"""Test case for delete_namespaced_aws_cluster
"""
pass
def test_delete_namespaced_aws_cluster_template(self):
"""Test case for delete_namespaced_aws_cluster_template
"""
pass
def test_delete_namespaced_aws_fargate_profile(self):
"""Test case for delete_namespaced_aws_fargate_profile
"""
pass
def test_delete_namespaced_aws_machine(self):
"""Test case for delete_namespaced_aws_machine
"""
pass
def test_delete_namespaced_aws_machine_pool(self):
"""Test case for delete_namespaced_aws_machine_pool
"""
pass
def test_delete_namespaced_aws_machine_template(self):
"""Test case for delete_namespaced_aws_machine_template
"""
pass
def test_delete_namespaced_aws_managed_machine_pool(self):
"""Test case for delete_namespaced_aws_managed_machine_pool
"""
pass
def test_list_aws_cluster_controller_identity(self):
"""Test case for list_aws_cluster_controller_identity
"""
pass
def test_list_aws_cluster_for_all_namespaces(self):
"""Test case for list_aws_cluster_for_all_namespaces
"""
pass
def test_list_aws_cluster_role_identity(self):
"""Test case for list_aws_cluster_role_identity
"""
pass
def test_list_aws_cluster_static_identity(self):
"""Test case for list_aws_cluster_static_identity
"""
pass
def test_list_aws_cluster_template_for_all_namespaces(self):
"""Test case for list_aws_cluster_template_for_all_namespaces
"""
pass
def test_list_aws_fargate_profile_for_all_namespaces(self):
"""Test case for list_aws_fargate_profile_for_all_namespaces
"""
pass
def test_list_aws_machine_for_all_namespaces(self):
"""Test case for list_aws_machine_for_all_namespaces
"""
pass
def test_list_aws_machine_pool_for_all_namespaces(self):
"""Test case for list_aws_machine_pool_for_all_namespaces
"""
pass
def test_list_aws_machine_template_for_all_namespaces(self):
"""Test case for list_aws_machine_template_for_all_namespaces
"""
pass
def test_list_aws_managed_machine_pool_for_all_namespaces(self):
"""Test case for list_aws_managed_machine_pool_for_all_namespaces
"""
pass
def test_list_namespaced_aws_cluster(self):
"""Test case for list_namespaced_aws_cluster
"""
pass
def test_list_namespaced_aws_cluster_template(self):
"""Test case for list_namespaced_aws_cluster_template
"""
pass
def test_list_namespaced_aws_fargate_profile(self):
"""Test case for list_namespaced_aws_fargate_profile
"""
pass
def test_list_namespaced_aws_machine(self):
"""Test case for list_namespaced_aws_machine
"""
pass
def test_list_namespaced_aws_machine_pool(self):
"""Test case for list_namespaced_aws_machine_pool
"""
pass
def test_list_namespaced_aws_machine_template(self):
"""Test case for list_namespaced_aws_machine_template
"""
pass
def test_list_namespaced_aws_managed_machine_pool(self):
"""Test case for list_namespaced_aws_managed_machine_pool
"""
pass
def test_patch_aws_cluster_controller_identity(self):
"""Test case for patch_aws_cluster_controller_identity
"""
pass
def test_patch_aws_cluster_role_identity(self):
"""Test case for patch_aws_cluster_role_identity
"""
pass
def test_patch_aws_cluster_static_identity(self):
"""Test case for patch_aws_cluster_static_identity
"""
pass
def test_patch_namespaced_aws_cluster(self):
"""Test case for patch_namespaced_aws_cluster
"""
pass
def test_patch_namespaced_aws_cluster_status(self):
"""Test case for patch_namespaced_aws_cluster_status
"""
pass
def test_patch_namespaced_aws_cluster_template(self):
"""Test case for patch_namespaced_aws_cluster_template
"""
pass
def test_patch_namespaced_aws_fargate_profile(self):
"""Test case for patch_namespaced_aws_fargate_profile
"""
pass
def test_patch_namespaced_aws_fargate_profile_status(self):
"""Test case for patch_namespaced_aws_fargate_profile_status
"""
pass
def test_patch_namespaced_aws_machine(self):
"""Test case for patch_namespaced_aws_machine
"""
pass
def test_patch_namespaced_aws_machine_pool(self):
"""Test case for patch_namespaced_aws_machine_pool
"""
pass
def test_patch_namespaced_aws_machine_pool_status(self):
"""Test case for patch_namespaced_aws_machine_pool_status
"""
pass
def test_patch_namespaced_aws_machine_status(self):
"""Test case for patch_namespaced_aws_machine_status
"""
pass
def test_patch_namespaced_aws_machine_template(self):
"""Test case for patch_namespaced_aws_machine_template
"""
pass
def test_patch_namespaced_aws_managed_machine_pool(self):
"""Test case for patch_namespaced_aws_managed_machine_pool
"""
pass
def test_patch_namespaced_aws_managed_machine_pool_status(self):
"""Test case for patch_namespaced_aws_managed_machine_pool_status
"""
pass
def test_read_aws_cluster_controller_identity(self):
"""Test case for read_aws_cluster_controller_identity
"""
pass
def test_read_aws_cluster_role_identity(self):
"""Test case for read_aws_cluster_role_identity
"""
pass
def test_read_aws_cluster_static_identity(self):
"""Test case for read_aws_cluster_static_identity
"""
pass
def test_read_namespaced_aws_cluster(self):
"""Test case for read_namespaced_aws_cluster
"""
pass
def test_read_namespaced_aws_cluster_status(self):
"""Test case for read_namespaced_aws_cluster_status
"""
pass
def test_read_namespaced_aws_cluster_template(self):
"""Test case for read_namespaced_aws_cluster_template
"""
pass
def test_read_namespaced_aws_fargate_profile(self):
"""Test case for read_namespaced_aws_fargate_profile
"""
pass
def test_read_namespaced_aws_fargate_profile_status(self):
"""Test case for read_namespaced_aws_fargate_profile_status
"""
pass
def test_read_namespaced_aws_machine(self):
"""Test case for read_namespaced_aws_machine
"""
pass
def test_read_namespaced_aws_machine_pool(self):
"""Test case for read_namespaced_aws_machine_pool
"""
pass
def test_read_namespaced_aws_machine_pool_status(self):
"""Test case for read_namespaced_aws_machine_pool_status
"""
pass
def test_read_namespaced_aws_machine_status(self):
"""Test case for read_namespaced_aws_machine_status
"""
pass
def test_read_namespaced_aws_machine_template(self):
"""Test case for read_namespaced_aws_machine_template
"""
pass
def test_read_namespaced_aws_managed_machine_pool(self):
"""Test case for read_namespaced_aws_managed_machine_pool
"""
pass
def test_read_namespaced_aws_managed_machine_pool_status(self):
"""Test case for read_namespaced_aws_managed_machine_pool_status
"""
pass
def test_replace_aws_cluster_controller_identity(self):
"""Test case for replace_aws_cluster_controller_identity
"""
pass
def test_replace_aws_cluster_role_identity(self):
"""Test case for replace_aws_cluster_role_identity
"""
pass
def test_replace_aws_cluster_static_identity(self):
"""Test case for replace_aws_cluster_static_identity
"""
pass
def test_replace_namespaced_aws_cluster(self):
"""Test case for replace_namespaced_aws_cluster
"""
pass
def test_replace_namespaced_aws_cluster_status(self):
"""Test case for replace_namespaced_aws_cluster_status
"""
pass
def test_replace_namespaced_aws_cluster_template(self):
"""Test case for replace_namespaced_aws_cluster_template
"""
pass
def test_replace_namespaced_aws_fargate_profile(self):
"""Test case for replace_namespaced_aws_fargate_profile
"""
pass
def test_replace_namespaced_aws_fargate_profile_status(self):
"""Test case for replace_namespaced_aws_fargate_profile_status
"""
pass
def test_replace_namespaced_aws_machine(self):
"""Test case for replace_namespaced_aws_machine
"""
pass
def test_replace_namespaced_aws_machine_pool(self):
"""Test case for replace_namespaced_aws_machine_pool
"""
pass
def test_replace_namespaced_aws_machine_pool_status(self):
"""Test case for replace_namespaced_aws_machine_pool_status
"""
pass
def test_replace_namespaced_aws_machine_status(self):
"""Test case for replace_namespaced_aws_machine_status
"""
pass
def test_replace_namespaced_aws_machine_template(self):
"""Test case for replace_namespaced_aws_machine_template
"""
pass
def test_replace_namespaced_aws_managed_machine_pool(self):
"""Test case for replace_namespaced_aws_managed_machine_pool
"""
pass
def test_replace_namespaced_aws_managed_machine_pool_status(self):
"""Test case for replace_namespaced_aws_managed_machine_pool_status
"""
pass
if __name__ == '__main__':
unittest.main()
| 24.861775
| 139
| 0.689272
|
58a43d7f61e046ec644507c3f9d06ebbaa574b13
| 8,964
|
py
|
Python
|
PwdGen.py
|
KrVignesh/PasswordGenerator
|
f6bfdff96f84a1007e1598bc0e2727e3b1d2aab1
|
[
"MIT"
] | null | null | null |
PwdGen.py
|
KrVignesh/PasswordGenerator
|
f6bfdff96f84a1007e1598bc0e2727e3b1d2aab1
|
[
"MIT"
] | null | null | null |
PwdGen.py
|
KrVignesh/PasswordGenerator
|
f6bfdff96f84a1007e1598bc0e2727e3b1d2aab1
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import messagebox
from pathlib import Path
import random
import webbrowser
# -----FUNCTIONS
def disptotal():
global charCount
global gp
global character
global number
global symbol
gp = ""
total = charCount.get()
lett = ("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~")
character = charVar.get()
number = digVar.get()
symbol = symVar.get()
#To check if entered value is integer
try:
total = int(total)
except:
pass
if isinstance(total, int) != True:
messagebox.showerror(title = "ERROR", message = "Enter an integer Value")
elif total == 0:
messagebox.showwarning(title = "Zero Not Zero", message = "All available infinite integers, yet you chose '0' \nNOTE : minimum recommended character is 10")
elif (character & number & symbol == 1):
for i in range(0,int(total)):
gp += random.choice(lett)
output.insert(END, gp)
elif (character & number == 1) & (symbol == 0):
for i in range(0,int(total)):
gp += random.choice(lett[0:62])
output.insert(END, gp)
elif (character & symbol == 1) & (number == 0):
for i in range(0,int(total)):
gp += random.choice(lett[10:94])
output.insert(END, gp)
elif (character == 1) & (number & symbol == 0):
for i in range(0,int(total)):
gp += random.choice(lett[10:62])
output.insert(END, gp)
elif (character == 0) & (number & symbol == 1):
for i in range(0,int(total)):
gp += random.choice(lett[0:10] + lett[62:94])
output.insert(END, gp)
elif (character == 0) & (number == 1) & (symbol == 0):
for i in range(0,int(total)):
gp += random.choice(lett[0:10])
output.insert(END, gp)
elif (character & number == 0) & (symbol == 1):
for i in range(0,int(total)):
gp += random.choice(lett[62:94])
output.insert(END, gp)
else:
messagebox.showerror("Error", "select at least one checkbox")
# -----copy
def cpy():
window.clipboard_clear()
window.clipboard_append(gp)
window.update()
messagebox.showinfo(title = "Message", message = "Generated password is copied to the clipboard.\n\nNOTE : For security reason copied password will be removed after closing this app")
# -----clear
def clr():
output.delete(1.0, END)
charCount.delete(0, END)
charVar.set(0)
digVar.set(0)
symVar.set(0)
# -----save
def save():
global saveEntry
global savewin
savewin = Toplevel()
savewin.config(background = bColor)
saveLabel = Label(savewin, text = "Enter a unique name to identify this password", font=bodyFont, fg = fColor, bg = bColor)
saveLabel.pack()
saveEntry = Entry(savewin, bg = bColor, relief = "sunken")
saveEntry.pack()
saveSubmit = Button(savewin, text = "Save", cursor = "hand2", font = optionFont, command = write, relief = "solid", fg = fColor, bg = bColor, highlightthickness = 1, highlightbackground = "white")
saveSubmit.pack()
def write():
uDir = str(Path.home())
uName = saveEntry.get()
file = open(uDir + "/Documents/password.txt", "a")
file.write(str(uName) + " : ")
file.write(str(gp))
file.write("\n-----End-----\n")
file.close()
messagebox.showinfo(title = "Success", message = "Password saved successfully in \n /user/Documents/password.txt")
savewin.destroy()
# -----links
def ghlink():
webbrowser.open(r"https://github.com/KrVignesh")
def twlink():
webbrowser.open(r"https://twitter.com/KrVigneshVictor")
def maillink():
webbrowser.open("mailto:krvigneshchn17@gmail.com")
# -----about
def about():
abtwin = Toplevel()
abtwin.config(background = bColor)
abtwin.title("ABOUT - Password Generator")
abtLabel = Label(abtwin, text = "This app Password Generator is created using Python.\n For more details, issues, suggestions \nvisit :", font = ("Courier 10 Pitch","12"), fg = fColor, bg = bColor)
abtLabel.pack()
abtgit = Button(abtwin, text = "Github", command = ghlink, fg = "white", bg = "black", cursor = "hand2", relief = "solid", font = optionFont, highlightthickness = 1, highlightbackground = "white")
abtgit.pack(side = LEFT)
abttwitter = Button(abtwin, text = "Twitter", command = twlink, fg = "white", bg = "#1DA1F2", cursor = "hand2", relief = "solid", font = optionFont, highlightthickness = 1, highlightbackground = "white")
abttwitter.pack(side = LEFT, padx = 20)
abtmail = Button(abtwin, text = "Email", command = maillink, fg = "white", bg = "#D54B3D", cursor = "hand2", relief = "solid", font = optionFont, highlightthickness = 1, highlightbackground = "white")
abtmail.pack(side = LEFT)
# -----help
def hlp():
hlpwin = Toplevel()
hlpwin.config(background = bColor)
hlpwin.title("HELP - Password Generator")
hlpLabel = Label(hlpwin, text = "Random English Letters includes : a-z, A-Z \n\nDigits includes : 0-9 \n\nSymbols include : !\"#$%&'()*+,-./:;<=>?@[\\]^_`|{}~ \n ", font = bodyFont, fg = fColor, bg = bColor)
hlpLabel.pack()
# -----Color & fonts
bColor = "#ae00ff"
fColor = "white"
bodyFont = ("Courier 10 Pitch","14", "bold")
optionFont = ("Courier 10 Pitch", "12", "italic")
# -----GUI
# -----create main window
window = Tk()
window.config(background = bColor)
window.title("Password Generator")
# -----heading
title = Label(window, text = "PASSWORD GENERATOR", anchor = "center", font=("Free Mono","30", "bold"), fg = fColor, bg = bColor)
title.grid(column = 0, row = 0, columnspan = 4, pady = 20)
# -----get number of character in password
charLabel = Label(window, text = "How many character do you need in Password\n(Enter integer numbers)", font=bodyFont, fg = fColor, bg = bColor)
charLabel.grid(column = 0, row = 1, pady = 10, padx = 20, sticky = E)
charCount = Entry(window, relief = "sunken", justify = "center", borderwidth = 4, bg = bColor)
charCount.grid(column = 1, row = 1)
# -----things to be included in password
incLabel = Label(window, text = "Password should include", font=bodyFont, fg = fColor, bg = bColor)
incLabel.grid(column = 0, row = 2, pady = 10, sticky = E)
charVar = BooleanVar()
incChar = Checkbutton(window, text = "Random English Letters", variable = charVar, font = optionFont, cursor = "hand2", bg = bColor, fg = fColor, highlightthickness = 0, selectcolor = bColor, activebackground = bColor)
incChar.grid(column = 1, row = 2)
digVar = BooleanVar()
incDigits = Checkbutton(window, text = "Digits", variable = digVar, font = optionFont, cursor = "hand2", bg = bColor, fg = fColor, highlightthickness = 0, selectcolor = bColor, activebackground = bColor)
incDigits.grid(column = 2, row = 2)
symVar = BooleanVar()
incSym = Checkbutton(window, text = "Symbols", variable = symVar, font = optionFont, cursor = "hand2", bg = bColor, fg = fColor, highlightthickness = 0, selectcolor = bColor, activebackground = bColor)
incSym.grid(column = 3, row = 2)
# -----Generate - button
Submit = Button(window, text = "Generate", cursor = "hand2", font = optionFont, command = disptotal, relief = "solid", fg = fColor, bg = bColor, highlightthickness = 1, highlightbackground = "white")
Submit.grid(column = 1, row = 4, pady = 10)
# -----clear all - button
clear = Button(window, text = "Clear All",command = clr, font = optionFont, cursor = "hand2", relief = "solid", fg = fColor, bg = bColor, highlightthickness = 1, highlightbackground = "white")
clear.grid(column = 2, row = 4, pady = 10)
#-----generated password
pwdLabel = Label(window, text = "Generated Password", font= bodyFont, fg = fColor, bg = bColor)
pwdLabel.grid(column = 0, row = 5, pady = 10, sticky = E)
# -----output
output = Text(window, width = 60, height = 5, borderwidth = 4, relief = "ridge", bg = bColor)
output.grid(column = 1, row = 5, columnspan = 3, pady = 20, padx = 10)
# -----Copy generated password to clipboard - button
copy = Button(window, text = "Copy",command = cpy, font = optionFont, cursor = "hand2", relief = "solid", fg = fColor, bg = bColor, highlightthickness = 1, highlightbackground = "white")
copy.grid(column = 1, row = 6, pady = 10, columnspan = 1)
# -----save generated password to a file - button
store = Button(window, text = "Save",command = save, font = optionFont, cursor = "hand2", relief = "solid", fg = fColor, bg = bColor, highlightthickness = 1, highlightbackground = "white")
store.grid(column = 2, row = 6, pady = 10, columnspan = 1)
# -----about - button
abt = Button(window, text = "About",command = about, font = optionFont, cursor = "hand2", relief = "solid", fg = fColor, bg = bColor, highlightthickness = 1, highlightbackground = "white")
abt.grid(column = 2, row = 7, pady = 5)
# -----help - button
more = Button(window, text = "Help",command = hlp, font = optionFont, cursor = "hand2", relief = "solid", fg = fColor, bg = bColor, highlightthickness = 1, highlightbackground = "white")
more.grid(column = 3, row = 7)
# -----quit - button
exit = Button(window, text = "Quit",command = window.destroy, font = optionFont, cursor = "hand2", relief = "solid", fg = fColor, bg = bColor, highlightthickness = 1, highlightbackground = "white")
exit.grid(column = 4, row = 7, padx = 5)
window.mainloop()
| 41.88785
| 218
| 0.671575
|
61dae00b1191af31aa31bf7dd39913fb1d556cdd
| 1,363
|
py
|
Python
|
Lib/test/test_osx_env.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | 1
|
2018-06-21T18:21:24.000Z
|
2018-06-21T18:21:24.000Z
|
Lib/test/test_osx_env.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_osx_env.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
"""
Test suite dla OS X interpreter environment variables.
"""
z test.support zaimportuj EnvironmentVarGuard
zaimportuj subprocess
zaimportuj sys
zaimportuj sysconfig
zaimportuj unittest
@unittest.skipUnless(sys.platform == 'darwin' oraz
sysconfig.get_config_var('WITH_NEXT_FRAMEWORK'),
'unnecessary on this platform')
klasa OSXEnvironmentVariableTestCase(unittest.TestCase):
def _check_sys(self, ev, cond, sv, val = sys.executable + 'dummy'):
przy EnvironmentVarGuard() jako evg:
subpc = [str(sys.executable), '-c',
'zaimportuj sys; sys.exit(2 jeżeli "%s" %s %s inaczej 3)' % (val, cond, sv)]
# ensure environment variable does nie exist
evg.unset(ev)
# test that test on sys.xxx normally fails
rc = subprocess.call(subpc)
self.assertEqual(rc, 3, "expected %s nie %s %s" % (ev, cond, sv))
# set environ variable
evg.set(ev, val)
# test that sys.xxx has been influenced by the environ value
rc = subprocess.call(subpc)
self.assertEqual(rc, 2, "expected %s %s %s" % (ev, cond, sv))
def test_pythonexecutable_sets_sys_executable(self):
self._check_sys('PYTHONEXECUTABLE', '==', 'sys.executable')
jeżeli __name__ == "__main__":
unittest.main()
| 38.942857
| 92
| 0.630961
|
9fac272ee6df9bc6681c02b6a0fa34ecb5889f55
| 4,358
|
py
|
Python
|
django/bossingest/test/int_test_ingest_manager.py
|
ArnaudGallardo/boss
|
c0d3bbca31575ac5442822b8d7f962def32d9072
|
[
"Apache-2.0"
] | 20
|
2016-05-16T21:08:13.000Z
|
2021-11-16T11:50:19.000Z
|
django/bossingest/test/int_test_ingest_manager.py
|
ArnaudGallardo/boss
|
c0d3bbca31575ac5442822b8d7f962def32d9072
|
[
"Apache-2.0"
] | 31
|
2016-10-28T17:51:11.000Z
|
2022-02-10T08:07:31.000Z
|
django/bossingest/test/int_test_ingest_manager.py
|
ArnaudGallardo/boss
|
c0d3bbca31575ac5442822b8d7f962def32d9072
|
[
"Apache-2.0"
] | 12
|
2016-10-28T17:47:01.000Z
|
2021-05-18T23:47:06.000Z
|
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import unittest
import json
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from bossingest.ingest_manager import IngestManager
from bossingest.test.setup import SetupTests
from bosscore.test.setup_db import SetupTestDB
from ndingest.ndqueue.ndqueue import NDQueue
from ndingest.ndqueue.uploadqueue import UploadQueue
from ndingest.ndqueue.ingestqueue import IngestQueue
from ndingest.ndingestproj.bossingestproj import BossIngestProj
class BossIntegrationIngestManagerTestMixin(object):
def test_validate_ingest(self):
"""Method to test validation method"""
# Validate schema and config file
ingest_mgmr = IngestManager()
response = ingest_mgmr.validate_config_file(self.example_config_data)
assert (response is True)
# Validate properties
response = ingest_mgmr.validate_properties()
assert (response is True)
def test_validate_config_file(self):
"""Method to test validation of a config file"""
ingest_mgmr = IngestManager()
ingest_mgmr.validate_config_file(self.example_config_data)
assert(ingest_mgmr.config is not None)
assert (ingest_mgmr.config.config_data is not None)
def test_validate_properties(self):
"""Methods to test validation of properties of the config data"""
ingest_mgmr = IngestManager()
ingest_mgmr.validate_config_file(self.example_config_data)
ingest_mgmr.validate_properties()
assert (ingest_mgmr.collection.name == 'my_col_1')
assert (ingest_mgmr.experiment.name == 'my_exp_1')
assert (ingest_mgmr.channel.name == 'my_ch_1')
def test_create_ingest_job(self):
"""Method to test creation o a ingest job from a config_data dict"""
ingest_mgmr = IngestManager()
ingest_mgmr.validate_config_file(self.example_config_data)
ingest_mgmr.validate_properties()
ingest_mgmr.owner = self.user.id
job = ingest_mgmr.create_ingest_job()
assert (job.id is not None)
def test_setup_ingest(self):
"""Method to test the setup_ingest method"""
try:
ingest_mgmr = IngestManager()
ingest_job = ingest_mgmr.setup_ingest(self.user.id, self.example_config_data)
assert (ingest_job is not None)
# Check if the queue's exist
proj_class = BossIngestProj.load()
nd_proj = proj_class(ingest_job.collection, ingest_job.experiment, ingest_job.channel,
ingest_job.resolution, ingest_job.id)
ingest_mgmr.nd_proj = nd_proj
upload_queue = UploadQueue(nd_proj, endpoint_url=None)
assert(upload_queue is not None)
ingest_queue = IngestQueue(nd_proj, endpoint_url=None)
assert (ingest_queue is not None)
ingest_mgmr.remove_ingest_credentials(ingest_job.id)
except:
raise
finally:
ingest_mgmr.delete_upload_queue()
ingest_mgmr.delete_ingest_queue()
class TestIntegrationBossIngestManager(BossIntegrationIngestManagerTestMixin, APITestCase):
def setUp(self):
# Randomize queue names.
NDQueue.test_mode = True
# Get the config_data
self.user = User.objects.create_superuser(username='testuser1', email='test@test.com', password='testuser')
config_data = SetupTests().get_ingest_config_data_dict()
self.example_config_data = config_data
dbsetup = SetupTestDB()
dbsetup.set_user(self.user)
dbsetup.insert_ingest_test_data()
def tearDown(self):
NDQueue.test_mode = False
| 38.22807
| 116
| 0.70927
|
cd4badd17a3a9b8f6d77ddf5c71744c1d16eda63
| 1,080
|
py
|
Python
|
athemesasl.py
|
PonyChat/docker-hosted-znc
|
a8e0e429fe5b4ce148914d748a3d1e394cc8c985
|
[
"CC0-1.0"
] | 3
|
2016-06-30T04:15:25.000Z
|
2016-07-30T19:00:53.000Z
|
athemesasl.py
|
PonyChat/docker-hosted-znc
|
a8e0e429fe5b4ce148914d748a3d1e394cc8c985
|
[
"CC0-1.0"
] | null | null | null |
athemesasl.py
|
PonyChat/docker-hosted-znc
|
a8e0e429fe5b4ce148914d748a3d1e394cc8c985
|
[
"CC0-1.0"
] | null | null | null |
import base64
import os
import znc
class athemesasl(znc.Module):
module_types = [znc.CModInfo.UserModule]
def __init__(self):
self.description = "Atheme SASL"
def OnServerCapAvailable(self, scap):
self.cookie = self.getCookie()
self.username = self.GetUser().GetUserName()
return scap == "sasl"
def OnServerCapResult(self, scap, success):
if scap == "sasl":
if success:
self.PutIRC("AUTHENTICATE AUTHCOOKIE")
self.PutIRC("AUTHENTICATE " +
self.makeSaslAuthString(self.username, self.cookie))
self.PutUser(":bnc.{} NOTICE * :*** Authenticated over Atheme XMLRPC".format(os.getenv("IRC_NETWORK_DOMAIN")))
def makeSaslAuthString(self, username, cookie):
return (base64.b64encode(bytes("%s\0%s\0%s" %
(username, username, cookie), "utf-8"))).decode("utf-8")
def getCookie(self):
with open("/tmp/znc-cookie-%s" %
self.GetUser().GetUserName(), "r") as fin:
return fin.readline()
| 32.727273
| 122
| 0.600926
|
198c30a663d3e236957b0b2f548d7e50f41bffc4
| 2,581
|
py
|
Python
|
python/convergdb/locking.py
|
ray-pan-bci/convergdb
|
d12730d801200a0a084038dc214c23c2d8adf69b
|
[
"MIT"
] | 10
|
2018-04-19T16:09:11.000Z
|
2020-04-15T03:43:28.000Z
|
python/convergdb/locking.py
|
ray-pan-bci/convergdb
|
d12730d801200a0a084038dc214c23c2d8adf69b
|
[
"MIT"
] | 13
|
2020-04-09T13:56:38.000Z
|
2020-04-15T03:27:02.000Z
|
python/convergdb/locking.py
|
ray-pan-bci/convergdb
|
d12730d801200a0a084038dc214c23c2d8adf69b
|
[
"MIT"
] | 4
|
2018-08-31T09:15:27.000Z
|
2020-04-28T01:24:17.000Z
|
import boto3
import os
import time
import uuid
from convergdb_logging import *
from functools import wraps
def dynamodb_client():
if os.environ.has_key('AWS_GLUE_REGION'):
return boto3.client('dynamodb', region_name=os.environ['AWS_GLUE_REGION'])
else:
return boto3.client('dynamodb')
lock_table = os.environ['LOCK_TABLE']
lock_id = os.environ['LOCK_ID']
def acquire_lock(owner_id):
put_params = {
'TableName': lock_table,
'Item': {
'LockID': {
'S': lock_id
},
'OwnerID': {
'S': owner_id
}
},
'ConditionExpression': 'attribute_not_exists(LockID)'
}
# Will raise an exception if the item already exists.
# Otherwise, catches 'AccessDeniedException' and retry
convergdb_log("Attempting conditional put: lock_id: [" + lock_id + "], owner_id: [" + owner_id + "]")
dynamodb_client().put_item(**put_params)
convergdb_log("Lock acquired: [" + lock_id + "]")
def release_lock(owner_id):
delete_params = {
'TableName': lock_table,
'ConditionExpression': 'OwnerID = :OwnerID',
'ExpressionAttributeValues': {
':OwnerID': {
'S': owner_id
}
},
'Key': {
'LockID': {
'S': lock_id
}
}
}
# No exceptions raised if condition is not met.
convergdb_log("Attempting conditional delete: lock_id: [" + lock_id + "], owner_id: [" + owner_id + "]")
dynamodb_client().delete_item(**delete_params)
convergdb_log("Lock released: [" + lock_id + "]")
def lock(function):
@wraps(function)
def wrapper(*args, **kwargs):
# Validate environment variables exist
assert lock_table != None
assert lock_id != None
owner_id = str(uuid.uuid4())
response = {}
err = None
try:
acquire_lock(owner_id)
# LOCKED at this point. Single execution in progress here.
# - Business logic should be resilient to partial or subsequent
# execution (but not concurrent execution).
response = function(*args, **kwargs)
except Exception as e:
# catch the exception
err = e
finally:
# release the lock
# if this fails.. it will raise an exception
release_lock(owner_id)
# raise previous exception
if err:
raise err
return response
return wrapper
| 27.752688
| 109
| 0.567222
|
d5c84effc8f324ca8040a7a285b73f25aa607fbf
| 696
|
py
|
Python
|
testing/cache/property_pickled.py
|
xapple/plumbing
|
47a0a85c5367a01385e8ff30ed658bc34cf0ce2a
|
[
"MIT"
] | null | null | null |
testing/cache/property_pickled.py
|
xapple/plumbing
|
47a0a85c5367a01385e8ff30ed658bc34cf0ce2a
|
[
"MIT"
] | null | null | null |
testing/cache/property_pickled.py
|
xapple/plumbing
|
47a0a85c5367a01385e8ff30ed658bc34cf0ce2a
|
[
"MIT"
] | 1
|
2015-08-26T08:53:19.000Z
|
2015-08-26T08:53:19.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script to test the functionality of the property_pickled decorator.
Written by Lucas Sinclair. MIT Licensed.
Call it like this:
$ ipython3 -i ~/repos/plumbing/test/cache/property_pickled.py
"""
# Internal modules #
from plumbing.cache import property_pickled
###############################################################################
class Square:
def __init__(self, size):
self.size = size
@property_pickled
def area(self):
print("Evaluating...")
return self.size * self.size
###############################################################################
shape = Square(5)
print(shape.area)
| 24
| 79
| 0.525862
|
43e4392078e272cb35a45f16f67f418930d593c0
| 4,918
|
py
|
Python
|
src/models/nn/gate.py
|
dumpmemory/state-spaces
|
2a85503cb3e9e86cc05753950d4a249df9a0fffb
|
[
"Apache-2.0"
] | 513
|
2021-11-03T23:08:23.000Z
|
2022-03-31T16:29:18.000Z
|
src/models/nn/gate.py
|
dumpmemory/state-spaces
|
2a85503cb3e9e86cc05753950d4a249df9a0fffb
|
[
"Apache-2.0"
] | 18
|
2021-11-05T12:42:59.000Z
|
2022-03-27T19:49:55.000Z
|
src/models/nn/gate.py
|
MikeOwino/state-spaces
|
b6672bca994b6a36347f414faa59761e42b1e2b1
|
[
"Apache-2.0"
] | 47
|
2021-11-04T01:32:54.000Z
|
2022-03-30T18:24:26.000Z
|
""" Defines flexible gating mechanisms based on ideas from LSSL paper and UR-LSTM paper https://arxiv.org/abs/1910.09890 """
import torch
import torch.nn as nn
class Gate(nn.Module):
""" Implements gating mechanisms. TODO update this with more detailed description with reference to LSSL paper when it's on arxiv
Mechanisms:
N - No gate
G - Standard sigmoid gate
UR - Uniform refine gates
R - Refine gate
FS - Forward discretization, Sigmoid activation [equivalent to G]
BE - Backward discretization, Exp activation [equivalent to G]
BR - Backward discretization, Relu activation
TE - Trapezoid discretization, Exp activation
TR - Trapezoid discretization, Relu activation
TS - Trapezoid discretization, Sigmoid activation (0 to 2)
"""
def __init__(self, size, preact_ctor, preact_args, mechanism='N'):
super().__init__()
self.size = size
self.mechanism = mechanism
if self.mechanism == 'N':
pass
elif self.mechanism in ['G', 'FS', 'BE', 'BR', 'TE', 'TR', 'TS', 'ZE', 'ZR', 'ZS']:
self.W_g = preact_ctor(*preact_args)
elif self.mechanism in ['U', 'UT']:
self.W_g = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'UR':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'R':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
elif self.mechanism in ['GT']:
self.W_g = preact_ctor(*preact_args)
else:
assert False, f'Gating type {self.mechanism} is not supported.'
def forward(self, *inputs):
if self.mechanism == 'N':
return 1.0
if self.mechanism == 'G':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
if self.mechanism == 'U':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
elif self.mechanism == 'UR':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'R':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'UT':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'GT':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
else:
g_preact = self.W_g(*inputs)
# if self.mechanism[1] == 'S':
# g = torch.sigmoid(g_preact)
# elif self.mechanism[1] == 'E':
# g = torch.exp(g_preact)
# elif self.mechanism[1] == 'R':
# g = torch.relu(g_preact)
if self.mechanism == 'FS':
g = torch.sigmoid(g_preact)
g = self.forward_diff(g)
elif self.mechanism == 'BE':
g = torch.exp(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'BR':
g = torch.relu(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'TS':
g = 2 * torch.sigmoid(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TE':
g = torch.exp(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TR':
g = torch.relu(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'ZE':
g = torch.exp(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZR':
g = torch.relu(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZS':
g = torch.sigmoid(g_preact)
g = self.zoh(g)
return g
def forward_diff(self, x):
return x
def backward_diff(self, x):
return x / (1+x)
# return 1 / (1+1/x)
def trapezoid(self, x):
return x / (1 + x/2)
# return 1 / (.5 + 1/x)
def zoh(self, x):
return 1 - torch.exp(-x)
| 37.541985
| 133
| 0.524807
|
8ede58498aa9449244e7b3fddd85f7d06e44c1a7
| 1,136
|
py
|
Python
|
heart_disease/config/__init__.py
|
victor-iyi/heart-disease
|
06540b582e8752d2bb6a32366077872d32d7c0e4
|
[
"MIT"
] | 1
|
2021-06-20T09:08:26.000Z
|
2021-06-20T09:08:26.000Z
|
heart_disease/config/__init__.py
|
victor-iyi/heart-disease
|
06540b582e8752d2bb6a32366077872d32d7c0e4
|
[
"MIT"
] | null | null | null |
heart_disease/config/__init__.py
|
victor-iyi/heart-disease
|
06540b582e8752d2bb6a32366077872d32d7c0e4
|
[
"MIT"
] | null | null | null |
# Copyright 2021 Victor I. Afolabi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration module.
Features:
- File configuration
- Logging configuration
- Utility/Handy classes and functions for:
- Downloading and extracting files and folders.
- Loading configuration files .json, .csv, .cfg, .in, .yaml, etc...
"""
from heart_disease.config.config import Config
from heart_disease.config.consts import FS, LOGGER, SETUP
from heart_disease.config.util import Log
__all__ = [
# Configuration utils.
'Config',
# File system configurations.
'FS', 'SETUP', 'LOGGER',
# Utility files.
'Log',
]
| 29.128205
| 74
| 0.732394
|
a4aa2d57a299c48c35117da629858819aa4eb7b6
| 3,368
|
py
|
Python
|
bokeh/sphinxext/bokeh_prop.py
|
timelyportfolio/bokeh
|
a976a85535cf137c6238ce9e90b41ab14ae8ce22
|
[
"BSD-3-Clause"
] | 1
|
2015-07-17T13:57:01.000Z
|
2015-07-17T13:57:01.000Z
|
bokeh/sphinxext/bokeh_prop.py
|
evidation-health/bokeh
|
2c580d93419033b962d36e3c46d7606cc2f24606
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/sphinxext/bokeh_prop.py
|
evidation-health/bokeh
|
2c580d93419033b962d36e3c46d7606cc2f24606
|
[
"BSD-3-Clause"
] | 1
|
2021-08-01T08:38:53.000Z
|
2021-08-01T08:38:53.000Z
|
""" Thoroughly document Bokeh property attributes.
The ``bokeh-prop`` directive generates useful type information
for the property attribute, including cross links to the relevant
property types. Additionally, any per-attribute docstrings are
also displayed.
Usage
-----
This directive takes the path to an attribute on a Bokeh
model class as an argument::
.. bokeh-prop:: bokeh.sphinxext.sample.Bar.thing
Examples
--------
For the following definition of ``bokeh.sphinxext.sample.Bar``::
class Bar(PlotObject):
''' This is a Bar model. '''
thing = List(Int, help="doc for thing")
the above usage yields the output:
----
.. bokeh-prop:: bokeh.sphinxext.sample.Bar.thing
"""
from __future__ import absolute_import, print_function
import importlib
from docutils import nodes
from docutils.statemachine import ViewList
import textwrap
import jinja2
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from bokeh.plot_object import Viewable
import bokeh.properties
PROP_TEMPLATE = jinja2.Template(u"""
.. attribute:: {{ name }}
:module: {{ module }}
*property type:* {{ type_info }}
{% if doc %}{{ doc|indent(4) }}{% endif %}
""")
PROP_NAMES = [
name for name, cls in bokeh.properties.__dict__.items()
if isinstance(cls, type) and issubclass(cls, bokeh.properties.Property)
]
PROP_NAMES.sort(reverse=True, key=len)
class BokehPropDirective(Directive):
has_content = True
required_arguments = 1
def run(self):
prop_path = self.arguments[0]
module_path, model_name, prop_name = prop_path.rsplit('.', 2)
try:
module = importlib.import_module(module_path)
except ImportError:
pass
model = getattr(module, model_name, None)
if model is None:
pass
if type(model) != Viewable:
pass
model_obj = model()
prop = getattr(model_obj.__class__, prop_name)
type_info = self._get_type_info(prop)
rst_text = PROP_TEMPLATE.render(
name=prop_name,
module=module_path,
type_info=type_info,
doc="" if prop.__doc__ is None else textwrap.dedent(prop.__doc__),
)
result = ViewList()
for line in rst_text.split("\n"):
result.append(line, "<bokeh-prop>")
node = nodes.paragraph()
node.document = self.state.document
nested_parse_with_titles(self.state, result, node)
return node.children
def _get_type_info(self, prop):
desc = str(prop)
template = ":class:`~bokeh.properties.%s`\ "
# some of the property names are substrings of other property names
# so first go through greedily replacing the longest possible match
# with a unique id (PROP_NAMES is reverse sorted by length)
for i, name in enumerate(PROP_NAMES):
desc = desc.replace(name, "__ID%d" % i)
# now replace the unique id with the corresponding prop name. Go in
# reverse to make sure replacements are greedy
for i in range(len(PROP_NAMES)-1, 0, -1):
name = PROP_NAMES[i]
desc = desc.replace("__ID%d" % i, template % name)
return desc
def setup(app):
app.add_directive_to_domain('py', 'bokeh-prop', BokehPropDirective)
| 26.730159
| 78
| 0.656176
|
5539ed8e2089c11e61964850537c439b5a5bdbd2
| 1,936
|
py
|
Python
|
lib/googlecloudsdk/api_lib/storage/patch_gcs_messages.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/googlecloudsdk/api_lib/storage/patch_gcs_messages.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/api_lib/storage/patch_gcs_messages.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patches for pickling isssues in apitools.
The storage surface needs to be able to serialize apitools messages to support
multiprocessing; however, there are a number of bugs with pickling apitools
messages that need to be patched, pending more permanent fixes.
"""
# TODO(b/171296237): Remove this file when fixes are submitted in apitools.
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.protorpclite import messages
def _time_zone_offset_init_args(self):
"""Implements apitools.base.protorpclite.util.TimeZoneOffset.__getinitargs__.
The apitools TimeZoneOffset class inherits from datetime.datetime, which
implements custom picking behavior in __reduce__. This reduce method cannot
handle the additional argument that TimeZoneOffset adds to __init__, which
makes TimeZoneOffset unpicklable without implementing __getinitargs__ as
we do here.
Args:
self (TimeZoneOffset): an instance of TimeZoneOffset.
Returns:
A tuple of arguments passed to TimeZoneOffset.__init__ when unpickling.
"""
# pylint: disable=protected-access
return (self._TimeZoneOffset__offset,)
# pylint: enable=protected-access
def patch():
messages.util.TimeZoneOffset.__getinitargs__ = _time_zone_offset_init_args
| 36.528302
| 79
| 0.789256
|
b4ceb86771d48481b16daa755a9bf1d40b4c2b4b
| 5,442
|
py
|
Python
|
build/plugins/pyx.py
|
Kim2212/catboost
|
b9ad3b0dac6269c2638e8ee4e7bb85662677a921
|
[
"Apache-2.0"
] | null | null | null |
build/plugins/pyx.py
|
Kim2212/catboost
|
b9ad3b0dac6269c2638e8ee4e7bb85662677a921
|
[
"Apache-2.0"
] | null | null | null |
build/plugins/pyx.py
|
Kim2212/catboost
|
b9ad3b0dac6269c2638e8ee4e7bb85662677a921
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import _common
import _import_wrapper as iw
INCLUDE_PATTERN = re.compile('(include *"([^"]+)")|(include *\'([^\']+)\')')
CIMPORT_PATTERN = re.compile('cimport +([^#]+)')
FROM_CIMPORT_PATTERN = re.compile('from +(\S+) +cimport +([^#]+)')
INDUCED_PATTERN = re.compile('cdef +extern +from +["\']<?([^">]+)>?["\']')
def find_init_files(files, unit):
traversed = set()
for include in files:
path = os.path.dirname(include)
while path and path not in traversed:
traversed.add(path)
init_file = _common.join_intl_paths(path, '__init__.py')
path = os.path.dirname(path)
if os.path.isfile(unit.resolve(_common.join_intl_paths('$S', init_file))):
yield init_file
class PyxParser(object):
def __init__(self, path, unit):
self._path = path
self._includes = []
self._induced = []
retargeted = os.path.join(unit.path(), os.path.basename(path))
if path.endswith('.pyx'):
pxd = path[:-4] + '.pxd'
if os.path.exists(pxd):
self._includes.append(unit.resolve_arc_path(pxd))
with open(path, 'rb') as f:
includes, induced, susp_includes = self.parse_includes(f.readlines())
for susp_incl in susp_includes:
incl_path = unit.resolve(os.path.join(unit.path(), susp_incl[0]))
if not os.path.isdir(incl_path):
includes.append(susp_incl[0] + '.pxd')
else:
for f in susp_incl[1]:
if f != '*':
includes.append(susp_incl[0] + '/' + f + '.pxd')
if includes:
self._includes += unit.resolve_include([retargeted] + includes + list(find_init_files(includes, unit)))
if induced:
self._induced += unit.resolve_include([retargeted] + induced + list(find_init_files(induced, unit)))
@staticmethod
def get_perm_includes():
where = 'contrib/tools/cython/Cython/Utility'
includes = [
'Buffer.c',
'Builtins.c',
'CMath.c',
'Capsule.c',
'CommonTypes.c',
'Complex.c',
'Coroutine.c',
'CythonFunction.c',
'Embed.c',
'Exceptions.c',
'ExtensionTypes.c',
'FunctionArguments.c',
'ImportExport.c',
'MemoryView_C.c',
'ModuleSetupCode.c',
'ObjectHandling.c',
'Optimize.c',
'Overflow.c',
'Printing.c',
'Profile.c',
'StringTools.c',
'TestUtilityLoader.c',
'TypeConversion.c',
]
return [where + '/' + x for x in includes]
@staticmethod
def parse_includes(content, perm_includes=True, direct_includes_only=False):
if perm_includes:
includes = PyxParser.get_perm_includes()
else:
includes = []
induced = []
susp_includes = []
for line in content:
line = line.lstrip()
incl = INCLUDE_PATTERN.match(line)
if incl:
incl_value = incl.group(2) or incl.group(4)
if incl_value:
includes.append(incl_value)
elif not direct_includes_only:
ind = INDUCED_PATTERN.match(line)
if ind and ind.group(1):
induced.append(ind.group(1))
else:
def filter_known_inner_paths(p):
# XXX: cpython is here but need to be added in PEERDIRs|ADDINCLs of cython project and * supported somehow
return p and p.split('.')[0] not in ('libc', 'libcpp', 'cython', 'cpython')
cimport = CIMPORT_PATTERN.match(line)
if cimport:
cimport_files = cimport.group(1)
cimport_files = [x.strip() for x in cimport_files.split(',')]
cimport_files = [x.split(' ')[0] for x in cimport_files]
for cimport_file in cimport_files:
if filter_known_inner_paths(cimport_file):
includes.append(cimport_file.replace('.', '/') + '.pxd')
else:
from_cimport = FROM_CIMPORT_PATTERN.match(line)
if from_cimport:
cimport_source = from_cimport.group(1)
cimport_symbols = from_cimport.group(2) or ''
cimport_symbols = [x.strip() for x in cimport_symbols.split(',')]
cimport_symbols = [x.split(' ')[0] for x in cimport_symbols]
if filter_known_inner_paths(cimport_source):
susp_includes.append((cimport_source.replace('.', '/'), cimport_symbols))
return includes, induced, susp_includes
def includes(self):
return self._includes
def induced_deps(self):
return {
'cpp': ['$S/contrib/libs/python/Include/Python.h'] + self._induced
}
def init():
iw.addparser('pyx', PyxParser, pass_induced_includes=True)
iw.addparser('pxd', PyxParser, pass_induced_includes=True)
iw.addparser('pxi', PyxParser, pass_induced_includes=True)
| 37.531034
| 130
| 0.527563
|
42fd18b2747b21c8b7ba6990e90564fc2faf30ed
| 730
|
py
|
Python
|
ap_cli/main.py
|
monty5811/apostello-docker-cli
|
80bf49897b8bb5f27c94768c142c6b3fbd6d36bf
|
[
"MIT"
] | null | null | null |
ap_cli/main.py
|
monty5811/apostello-docker-cli
|
80bf49897b8bb5f27c94768c142c6b3fbd6d36bf
|
[
"MIT"
] | null | null | null |
ap_cli/main.py
|
monty5811/apostello-docker-cli
|
80bf49897b8bb5f27c94768c142c6b3fbd6d36bf
|
[
"MIT"
] | null | null | null |
import os
import subprocess
from datetime import datetime
import click
from ap_cli import apostello
from ap_cli import build
from ap_cli import config
from ap_cli import db
from ap_cli import dev
from ap_cli import init
from ap_cli import upgrade
@click.group()
def cli():
"""Setup, configure, and deploy an instance of apostello."""
pass
cli.add_command(apostello.logs)
cli.add_command(apostello.start)
cli.add_command(apostello.stop)
cli.add_command(config.config)
cli.add_command(db.backup)
cli.add_command(db.restore)
cli.add_command(db.migrate)
cli.add_command(init.init)
cli.add_command(upgrade.upgrade)
cli.add_command(build.build)
# dev:
cli.add_command(dev.start_dev)
cli.add_command(dev.build_assets)
| 18.25
| 64
| 0.79589
|
7872ef9c750361ded12ce5059305f793d6795e34
| 2,108
|
py
|
Python
|
src/mds/api/contrib/smslib/clickatell.py
|
m-socha/sana.mds
|
4d3b71b7ba939c91570fee4f60444cf07035bd51
|
[
"BSD-3-Clause"
] | 2
|
2016-05-19T02:32:13.000Z
|
2017-09-06T07:06:25.000Z
|
src/mds/api/contrib/smslib/clickatell.py
|
m-socha/sana.mds
|
4d3b71b7ba939c91570fee4f60444cf07035bd51
|
[
"BSD-3-Clause"
] | 6
|
2015-07-19T17:40:49.000Z
|
2016-12-20T21:54:59.000Z
|
src/mds/api/contrib/smslib/clickatell.py
|
m-socha/sana.mds
|
4d3b71b7ba939c91570fee4f60444cf07035bd51
|
[
"BSD-3-Clause"
] | 14
|
2015-10-30T09:50:21.000Z
|
2019-06-15T13:07:37.000Z
|
'''
Created on Aug 11, 2012
:author: Sana Development Team
:version: 2.0
'''
try:
import json as simplejson
except ImportError, e:
import simplejson
import logging
import urllib
from django.conf import settings
def send_clickatell_notification(message_body, phoneId,formatter=None):
return ClickatellOpener().open(message_body, phoneId,formatter=formatter)
class ClickatellOpener:
def __init__(self):
pass
def open(self, n, phoneId, formatter=None):
"""Sends an SMS message to Clickatell http interface
See Clickatell API documentation for full details.
Clickatell params
user
Clickatell account user name
password
Clickatell account password
api_id
see Clickatell documentation
to
Recipient telephone number
text
Message Body
Clickatell url: http://api.clickatell.com/http/sendmsg?params
Parameters:
message_body
Message body
phoneId
Recipient
"""
result = False
try:
messages = formatter(n) if formatter else n
for message in messages:
params = urllib.urlencode({
'user': settings.CLICKATELL_USER,
'password': settings.CLICKATELL_PASSWORD,
'api_id': settings.CLICKATELL_API,
'to': phoneId,
'text': message
})
logging.info("Sending clickatell notification %s to %s" %
(message, phoneId))
response = urllib.urlopen(settings.CLICKATELL_URI % params).read()
logging.info("Clickatell response: %s" % response)
result = True
except Exception, e:
logging.error("Couldn't submit Clickatell notification for %s: %s" % (phoneId, e))
return result
| 29.277778
| 94
| 0.54222
|
8738ece7270371ecd161f339300520bfdd641234
| 1,930
|
py
|
Python
|
scons/scons-local-2.5.0/SCons/Tool/DCommon.py
|
emamanto/Soar
|
72d2bc095068dd87ac78dad4f48938f6edc0353a
|
[
"BSD-2-Clause"
] | 72
|
2020-06-12T06:33:41.000Z
|
2021-03-22T03:15:56.000Z
|
scons/scons-local-2.5.0/SCons/Tool/DCommon.py
|
emamanto/Soar
|
72d2bc095068dd87ac78dad4f48938f6edc0353a
|
[
"BSD-2-Clause"
] | 9
|
2020-07-02T09:36:49.000Z
|
2021-03-25T23:54:00.000Z
|
scons/scons-local-2.5.0/SCons/Tool/DCommon.py
|
emamanto/Soar
|
72d2bc095068dd87ac78dad4f48938f6edc0353a
|
[
"BSD-2-Clause"
] | 14
|
2020-06-12T03:08:03.000Z
|
2021-02-03T11:43:09.000Z
|
"""SCons.Tool.DCommon
Common code for the various D tools.
Coded by Russel Winder (russel@winder.org.uk)
2012-09-06
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/DCommon.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os.path
def isD(env, source):
if not source:
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext == '.d':
return 1
return 0
def addDPATHToEnv(env, executable):
dPath = env.WhereIs(executable)
if dPath:
phobosDir = dPath[:dPath.rindex(executable)] + '/../src/phobos'
if os.path.isdir(phobosDir):
env.Append(DPATH=[phobosDir])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.859649
| 106
| 0.714508
|
1d01890ceb33c9a214d1b92cca57cad014bea51e
| 3,064
|
py
|
Python
|
src/waldur_core/structure/management/commands/dumpusers.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 26
|
2017-10-18T13:49:58.000Z
|
2021-09-19T04:44:09.000Z
|
src/waldur_core/structure/management/commands/dumpusers.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 14
|
2018-12-10T14:14:51.000Z
|
2021-06-07T10:33:39.000Z
|
src/waldur_core/structure/management/commands/dumpusers.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 32
|
2017-09-24T03:10:45.000Z
|
2021-10-16T16:41:09.000Z
|
from collections import OrderedDict
import prettytable
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from waldur_core.structure import models
User = get_user_model()
USER_COLUMNS = OrderedDict(
[
# (Column name, User fields)
('Full name, Civil number', ('full_name', 'civil_number')),
('Email, Phone nr.', ('email', 'phone_number')),
('Job title', ('job_title',)),
('Staff, Support', ('is_staff', 'is_support',)),
]
)
# in chars
COLUMN_MAX_WIDTH = 25
def format_string_to_column_size(string):
if len(string) <= COLUMN_MAX_WIDTH:
return string
formatted = '\n'.join(
string[i : i + COLUMN_MAX_WIDTH]
for i in range(0, len(string), COLUMN_MAX_WIDTH)
)
if isinstance(formatted, str):
formatted = str(formatted, errors='replace')
return formatted
def to_string(value):
if isinstance(value, bool):
return 'Yes' if value else 'No'
elif isinstance(value, int):
return str(value)
elif isinstance(value, str):
return format_string_to_column_size(value)
elif isinstance(value, list):
strings = [to_string(v) for v in value]
result = ', '.join(strings)
if len(result) > COLUMN_MAX_WIDTH:
return '\n'.join(strings)
return result
return format_string_to_column_size(str(value))
class Command(BaseCommand):
help = "Dumps information about users, their organizations and projects."
def add_arguments(self, parser):
parser.add_argument(
'-o',
'--output',
dest='output',
default=None,
help='Specifies file to which the output is written. The output will be printed to stdout by default.',
)
def handle(self, *args, **options):
# fetch objects
users = User.objects.all()
project_roles = models.ProjectPermission.objects.filter(is_active=True)
customer_roles = models.CustomerPermission.objects.filter(is_active=True)
# build table
columns = list(USER_COLUMNS.keys()) + ['Organizations', 'Projects']
table = prettytable.PrettyTable(columns, hrules=prettytable.ALL)
for user in users:
user_customers = to_string(list(customer_roles.filter(user=user)))
user_projects = to_string(list(project_roles.filter(user=user)))
row = [
to_string(
[
getattr(user, f)
for f in fields
if getattr(user, f) not in ('', None)
]
)
for fields in USER_COLUMNS.values()
]
row += [user_customers, user_projects]
table.add_row(row)
# output
if options['output'] is None:
self.stdout.write(table.get_string())
return
with open(options['output'], 'w') as output_file:
output_file.write(table.get_string())
| 30.949495
| 115
| 0.598238
|
b7dd7ea6ea2c6f190b4c4795b2014cdf0ba59f84
| 3,393
|
py
|
Python
|
get_ttid.py
|
CCNU-internship-Dec2020/Scrape-IMDb-By-Searching-Name
|
4262727e68a0120c3ce6918acb74103e56ad977d
|
[
"MIT"
] | null | null | null |
get_ttid.py
|
CCNU-internship-Dec2020/Scrape-IMDb-By-Searching-Name
|
4262727e68a0120c3ce6918acb74103e56ad977d
|
[
"MIT"
] | null | null | null |
get_ttid.py
|
CCNU-internship-Dec2020/Scrape-IMDb-By-Searching-Name
|
4262727e68a0120c3ce6918acb74103e56ad977d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Copyright © 2021 - wwyqianqian <wwyqianqian@mails.ccnu.edu.com>
from bs4 import BeautifulSoup
import json
import requests
import re
import os
movieid = list()
title = list()
search_mov_url = list()
genres = list()
for line in open("movies.dat", encoding = "ISO-8859-1"):
movieid.append(line.rstrip().split("::")[0])
title.append(line.rstrip().split("::")[1])
genres.append(line.rstrip().split("::")[2])
for i in title:
line = str(i)
search_name = line.replace(' ', '+')
search_name = search_name.replace('(', '%28')
search_name = search_name.replace(')', '%29')
search_name = search_name.replace("'", '%27')
search_name = search_name.replace('&', '%26')
search_name = search_name.replace('!', '%21')
search_name = search_name.replace(',', '%2C')
search_name = search_name.replace(':', '%3A')
search_name = search_name.replace('?', '%3F')
search_name = search_name.replace('/', '%2F')
# print(title[movie_count], search_name) // now the movie names are encoded
# https://www.imdb.com/find?s=tt&q=
encode_url = "https://www.imdb.com/find?s=tt&q=" + search_name
search_mov_url.append(search_name)
# mov_search_url_list = list()
# def get_url_list(rootdir):
# with open(rootdir, 'r') as file_to_read:
# while True:
# line = file_to_read.readline()
# if not line:
# break
# line = line.strip('\n')
# mov_search_url_list.append(line)
# return mov_search_url_list
def get_title_id(index, url):
try:
response = requests.get(url)
soup = BeautifulSoup(response.text)
# print(soup)
except Exception as e:
print("Get Movie URL failed!")
with open('err_log.txt', 'a', encoding='utf-8') as f:
x = index + 1
f.write(str(x) + ": "+ str(e) + '\n')
try:
web_table = soup.find("table", attrs={"class": "findList"})
# print(web_table.a)
# <a href="/title/tt0208874/"><img src="https://m.media-amazon.com/images/M/MV5BYTIzYTA5MDEtY2I0OS00OGJhLTlmMDctZWRlMGRjYzAxZDQzXkEyXkFqcGdeQXVyMTQxNzMzNDI@._V1_UX32_CR0,0,32,44_AL_.jpg"/></a>
title_id_a_str = str(web_table.a)
title_id = title_id_a_str.split('/')[2]
print(index)
print(title_id) # tt0208874
with open('excess_log.txt', 'a+', encoding='utf-8') as f:
f.write(movieid[index] + "::" + title[index] + "::" + genres[index] + "::" + search_mov_url[index] + "::" + "https://www.imdb.com/title/" + title_id + "/" +'\n')
except Exception as e:
print("Find title id failed!")
with open('err_log.txt', 'a', encoding='utf-8') as f:
x = index + 1
f.write(str(x) + ": "+ str(e) + '\n')
def main():
# get_url_list("searchMovUrlList_byLine.txt") #get a list with 3883 items
for i in range(len(search_mov_url)):
get_title_id(i, "https://www.imdb.com/find?s=tt&q=" + search_mov_url[i])
main()
# out put:
# file:
# - excess_log.txt (2::Jumanji (1995)::Adventure|Children's|Fantasy::Jumanji+%281995%29::https://www.imdb.com/title/tt0113497/)
# - err_log.txt
# log :
# 0
# tt0114709
# 1
# tt0113497
# 2
# tt0113228
# 3
# tt0114885
# 4
# tt0113041
| 29.25
| 200
| 0.587681
|
e6d5bebff426aad31ae2f1f437150bffea7de45d
| 257
|
py
|
Python
|
homeassistant/components/sensibo/const.py
|
PiotrMachowski/core
|
b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b
|
[
"Apache-2.0"
] | 1
|
2018-08-01T02:37:08.000Z
|
2018-08-01T02:37:08.000Z
|
homeassistant/components/sensibo/const.py
|
PiotrMachowski/core
|
b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b
|
[
"Apache-2.0"
] | 18
|
2021-11-24T06:26:13.000Z
|
2022-03-31T06:25:15.000Z
|
homeassistant/components/sensibo/const.py
|
PiotrMachowski/core
|
b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b
|
[
"Apache-2.0"
] | 3
|
2021-11-14T13:29:33.000Z
|
2021-12-27T17:05:22.000Z
|
"""Constants for Sensibo."""
import logging
from homeassistant.const import Platform
LOGGER = logging.getLogger(__package__)
DEFAULT_SCAN_INTERVAL = 60
DOMAIN = "sensibo"
PLATFORMS = [Platform.CLIMATE]
ALL = ["all"]
DEFAULT_NAME = "Sensibo"
TIMEOUT = 8
| 17.133333
| 40
| 0.754864
|
b6bb31f979b17496ca0d2c805dde9260f9a07552
| 4,536
|
py
|
Python
|
gui/kivy/uix/dialogs/fee_dialog.py
|
OleksandrBlack/electrum-safecoin
|
71a383635f9f2c3b50649376daabb6ba610431c2
|
[
"MIT"
] | null | null | null |
gui/kivy/uix/dialogs/fee_dialog.py
|
OleksandrBlack/electrum-safecoin
|
71a383635f9f2c3b50649376daabb6ba610431c2
|
[
"MIT"
] | null | null | null |
gui/kivy/uix/dialogs/fee_dialog.py
|
OleksandrBlack/electrum-safecoin
|
71a383635f9f2c3b50649376daabb6ba610431c2
|
[
"MIT"
] | 1
|
2020-01-31T22:01:23.000Z
|
2020-01-31T22:01:23.000Z
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum_safecoin_gui.kivy.i18n import _
Builder.load_string('''
<FeeDialog@Popup>
id: popup
title: _('Transaction Fees')
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
method: 0
BoxLayout:
orientation: 'vertical'
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.5
Label:
text: _('Method') + ':'
Button:
text: _('Mempool') if root.method == 2 else _('ETA') if root.method == 1 else _('Static')
background_color: (0,0,0,0)
bold: True
on_release:
root.method = (root.method + 1) % 2
root.update_slider()
root.update_text()
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.5
Label:
text: (_('Target') if root.method > 0 else _('Fee')) + ':'
Label:
id: fee_target
text: ''
Slider:
id: slider
range: 0, 4
step: 1
on_value: root.on_slider(self.value)
Widget:
size_hint: 1, 0.5
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.5
TopLabel:
id: fee_estimate
text: ''
font_size: '14dp'
Widget:
size_hint: 1, 0.5
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.5
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.on_ok()
root.dismiss()
''')
class FeeDialog(Factory.Popup):
def __init__(self, app, config, callback):
Factory.Popup.__init__(self)
self.app = app
self.config = config
self.callback = callback
mempool = self.config.use_mempool_fees()
dynfees = self.config.is_dynfee()
self.method = (2 if mempool else 1) if dynfees else 0
self.update_slider()
self.update_text()
def update_text(self):
pos = int(self.ids.slider.value)
dynfees, mempool = self.get_method()
if self.method == 2:
fee_rate = self.config.depth_to_fee(pos)
target, estimate = self.config.get_fee_text(pos, dynfees, mempool, fee_rate)
msg = 'In the current network conditions, in order to be positioned %s, a transaction will require a fee of %s.' % (target, estimate)
elif self.method == 1:
fee_rate = self.config.eta_to_fee(pos)
target, estimate = self.config.get_fee_text(pos, dynfees, mempool, fee_rate)
msg = 'In the last few days, transactions that confirmed %s usually paid a fee of at least %s.' % (target.lower(), estimate)
else:
fee_rate = self.config.static_fee(pos)
target, estimate = self.config.get_fee_text(pos, dynfees, True, fee_rate)
msg = 'In the current network conditions, a transaction paying %s would be positioned %s.' % (target, estimate)
self.ids.fee_target.text = target
self.ids.fee_estimate.text = msg
def get_method(self):
dynfees = self.method > 0
mempool = self.method == 2
return dynfees, mempool
def update_slider(self):
slider = self.ids.slider
dynfees, mempool = self.get_method()
maxp, pos, fee_rate = self.config.get_fee_slider(dynfees, mempool)
slider.range = (0, maxp)
slider.step = 1
slider.value = pos
def on_ok(self):
value = int(self.ids.slider.value)
dynfees, mempool = self.get_method()
self.config.set_key('dynamic_fees', dynfees, False)
self.config.set_key('mempool_fees', False, False)
if dynfees:
if mempool:
self.config.set_key('depth_level', value, True)
else:
self.config.set_key('fee_level', value, True)
else:
self.config.set_key('fee_per_kb', self.config.static_fee(value), True)
self.callback()
def on_slider(self, value):
self.update_text()
| 34.363636
| 145
| 0.544533
|
b0f125d80fb7ade450d606b59202f6efdb882560
| 256
|
py
|
Python
|
delete_table.py
|
1021ky/try_dynamodb
|
6d254c139a62f578761639dd7deab2c7c44ab6a9
|
[
"MIT"
] | 2
|
2020-03-23T18:24:44.000Z
|
2020-03-25T21:25:52.000Z
|
delete_table.py
|
1021ky/try_dynamodb
|
6d254c139a62f578761639dd7deab2c7c44ab6a9
|
[
"MIT"
] | 2
|
2021-08-05T11:44:53.000Z
|
2021-08-05T11:44:59.000Z
|
delete_table.py
|
1021ky/try_dynamodb
|
6d254c139a62f578761639dd7deab2c7c44ab6a9
|
[
"MIT"
] | 1
|
2020-03-23T18:24:52.000Z
|
2020-03-23T18:24:52.000Z
|
import boto3
client = boto3.client('dynamodb', region_name='us-east-1')
try:
resp = client.delete_table(
TableName="Books",
)
print("Table deleted successfully!")
except Exception as e:
print("Error deleting table:")
print(e)
| 19.692308
| 58
| 0.660156
|
1e2d6f4d5f1601dfb52b9e477027b27c35f26274
| 1,294
|
py
|
Python
|
polls/migrations/0001_initial.py
|
armonkahil/djangoPolls
|
e610b8e3c123d064020c592f1cdf29ccfec3fdc4
|
[
"MIT"
] | null | null | null |
polls/migrations/0001_initial.py
|
armonkahil/djangoPolls
|
e610b8e3c123d064020c592f1cdf29ccfec3fdc4
|
[
"MIT"
] | null | null | null |
polls/migrations/0001_initial.py
|
armonkahil/djangoPolls
|
e610b8e3c123d064020c592f1cdf29ccfec3fdc4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-05-03 11:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
]
| 35.944444
| 118
| 0.613601
|
ebdcf5be8c31a69cd6cb0c1e20cada28056f92c1
| 5,405
|
py
|
Python
|
rl_memory/tests/test_dqn.py
|
Unique-Divine/test-repo
|
e7f82e8007df7cc210c35ffaf0f11855c8327c2f
|
[
"MIT"
] | null | null | null |
rl_memory/tests/test_dqn.py
|
Unique-Divine/test-repo
|
e7f82e8007df7cc210c35ffaf0f11855c8327c2f
|
[
"MIT"
] | 1
|
2021-11-12T01:20:30.000Z
|
2021-11-12T01:20:30.000Z
|
rl_memory/tests/test_dqn.py
|
Unique-Divine/test-repo
|
e7f82e8007df7cc210c35ffaf0f11855c8327c2f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Test module for all classes related to Deep Q-Learning, Deep Q-Network (DQN),
and Double DQN. See module `rl_memory.rl_algos.dqn`.
"""
import os
import sys
import torch
import random
import pytest
import warnings; warnings.filterwarnings("ignore")
from rl_memory.rlm_env import environment
from rl_memory.rl_algos import dqn_algo
# TODO
from rl_memory.experiments import dqn_experiments
import numpy as np
import rl_memory as rlm
from typing import List, Tuple, Optional
from torch import Tensor
Array = np.ndarray
class TestDQNInits:
"""Verifies that all of the abstract classes and concrete classes of
the vanilla policy gradient instantiate correctly.
"""
@staticmethod
def init_env() -> rlm.Env:
env: rlm.Env = environment.Env(
grid_shape=(15,15), n_goals=4, hole_pct = 0.3)
env.reset()
return env
def default_experiment_setup(self) \
-> Tuple[rlm.Env, dqn_algo.DQN]:
env: rlm.Env = self.init_env()
obs: rlm.Observation = environment.Observation(env = env)
obs_size = obs.size()
network_h_params = dqn_algo.NNHyperParameters(lr = 1e-3)
dqn = dqn_algo.DQN(
obs_size = obs_size, action_dim = len(env.action_space),
h_params = network_h_params)
return env, dqn
"""
def test_placeholder(self):
return 'yuh'
raise NotImplementedError
"""
def test_init_NNHyperParameters(self):
network_hparams = dqn_algo.NNHyperParameters(lr = 1e-3)
assert network_hparams
def test_init_DQNTransferLearning(self):
transfer_mgmt = dqn_algo.DQNTransferLearning(transfer_freq = 2)
assert transfer_mgmt
def test_DQNAlgo(self):
env, dqn = self.default_experiment_setup()
rl_algo = dqn_algo.DQNAlgo(
dqn=dqn,
env_like = env,)
rl_algo.run_algo(num_episodes = 5, max_num_scenes = 3)
def test_DQNAlgo_w_transfer(self):
env, dqn = self.default_experiment_setup()
transfer_freqs: List[int] = [1, 2, 3]
for transfer_freq in transfer_freqs:
rl_algo = dqn_algo.DQNAlgo(
dqn=dqn,
env_like = env,
transfer_mgmt = dqn_algo.DQNTransferLearning(
transfer_freq = transfer_freq))
rl_algo.run_algo(num_episodes = 5, max_num_scenes = 3)
class TestPretrainingExperiment:
@staticmethod
def init_env() -> rlm.Env:
env: rlm.Env = environment.Env(
grid_shape=(15,15), n_goals=4, hole_pct = 0.3)
env.reset()
return env
def default_experiment_setup(self) \
-> Tuple[rlm.Env, dqn_algo.DQN]:
env: rlm.Env = self.init_env()
obs: rlm.Observation = environment.Observation(
env = env)
obs_size = obs.size()
network_h_params = dqn_algo.NNHyperParameters(lr = 1e-3)
dqn = dqn_algo.DQN(
obs_size = obs_size, action_dim = len(env.action_space),
h_params = network_h_params)
return env, dqn
def test_init_PretrainingExperiment(self):
env, dqn = self.default_experiment_setup()
experiment = dqn_experiments.PretrainingExperiment(
env = env,
num_episodes = 3, transfer_freq = 1 )
assert experiment
def test_easy_env(self):
env, dqn = self.default_experiment_setup()
experiment = dqn_experiments.PretrainingExperiment(
env = env,
num_episodes = 3, transfer_freq = 1 )
easy_env: rlm.Env = experiment.easy_env()
assert isinstance(easy_env, environment.Env)
def test_pretrain_on_easy_env(self):
env, dqn = self.default_experiment_setup()
experiment = dqn_experiments.PretrainingExperiment(
env = env,
num_episodes = 3, transfer_freq = 1 )
experiment.pretrain_on_easy_env(dqn = dqn)
def test_pretrain_to_threshold(self):
env, dqn = self.default_experiment_setup()
experiment = dqn_experiments.PretrainingExperiment(
env = env,
num_episodes = 100, transfer_freq = 1 )
dqn = experiment.pretrain_to_threshold(
dqn = dqn)
return dqn
def test_experiment_vpg_transfer(self):
return 'yuh' # TODO
# Check both with default policy net
# and with a custom one
raise NotImplementedError
class TestEvaluateDQN:
def test_train(self):
"""Integration test on whether DQNAlgo runs for training."""
env = environment.Env()
env.reset()
experiment = dqn_experiments.DQNEvalExperiment()
experiment.train(env = env, num_episodes = 2)
def test_test(self):
"""Integration test on whether DQNAlgo runs for validation."""
env = environment.Env()
env.reset()
experiment = dqn_experiments.DQNEvalExperiment()
train_algo: vpg.DQNAlgo = experiment.train(env = env, num_episodes = 1)
experiment.test(rl_algo = train_algo, env = env, num_episodes = 1)
def test_plot_results(self):
env = environment.Env()
env.reset()
experiment = dqn_experiments.DQNEvalExperiment()
experiment.main(n_episodes_train = 500, n_episodes_test = 20)
| 33.571429
| 80
| 0.633673
|
59c23900fa9d77270082f0a2e8b06ca5cc723500
| 13
|
py
|
Python
|
helloworld.py
|
rock304/pythonExamples
|
71635bfa402cf7092361d5f1dfed532ed4513e2d
|
[
"Apache-2.0"
] | 3
|
2020-03-21T04:37:50.000Z
|
2021-08-14T07:31:13.000Z
|
helloworld.py
|
rock304/pythonExamples
|
71635bfa402cf7092361d5f1dfed532ed4513e2d
|
[
"Apache-2.0"
] | null | null | null |
helloworld.py
|
rock304/pythonExamples
|
71635bfa402cf7092361d5f1dfed532ed4513e2d
|
[
"Apache-2.0"
] | 3
|
2020-02-04T13:28:47.000Z
|
2020-06-10T01:34:19.000Z
|
print "hello"
| 13
| 13
| 0.769231
|
43d74a2993cea0d977a99bf0ec7f485be635dc9f
| 26,430
|
py
|
Python
|
yolov5/utils/general.py
|
brekkanegg/vinbigdata-cxr
|
e52658d250d973ef3385d8e55ae3c4f236052eca
|
[
"MIT"
] | null | null | null |
yolov5/utils/general.py
|
brekkanegg/vinbigdata-cxr
|
e52658d250d973ef3385d8e55ae3c4f236052eca
|
[
"MIT"
] | null | null | null |
yolov5/utils/general.py
|
brekkanegg/vinbigdata-cxr
|
e52658d250d973ef3385d8e55ae3c4f236052eca
|
[
"MIT"
] | null | null | null |
# YOLOv5 general utils
import glob
import logging
import math
import os
import platform
import random
import re
import subprocess
import time
from pathlib import Path
import cv2
import numpy as np
import torch
import torchvision
import yaml
from utils.google_utils import gsutil_getsize
from utils.metrics import fitness
from utils.torch_utils import init_torch_seeds
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile="long")
np.set_printoptions(
linewidth=320, formatter={"float_kind": "{:11.5g}".format}
) # format short g, %precision=5
cv2.setNumThreads(
0
) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ["NUMEXPR_MAX_THREADS"] = str(min(os.cpu_count(), 8)) # NumExpr max threads
def set_logging(rank=-1):
logging.basicConfig(
format="%(message)s", level=logging.INFO if rank in [-1, 0] else logging.WARN
)
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds
random.seed(seed)
np.random.seed(seed)
init_torch_seeds(seed)
def get_latest_run(search_dir="."):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f"{search_dir}/**/last*.pt", recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ""
def isdocker():
# Is environment a Docker container
return Path("/workspace").exists() # or Path('/.dockerenv').exists()
def emojis(str=""):
# Return platform-dependent emoji-safe version of string
return (
str.encode().decode("ascii", "ignore")
if platform.system() == "Windows"
else str
)
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
return True
except OSError:
return False
def check_git_status():
# Recommend 'git pull' if code is out of date
print(colorstr("github: "), end="")
try:
assert Path(".git").exists(), "skipping check (not a git repository)"
assert not isdocker(), "skipping check (Docker image)"
assert check_online(), "skipping check (offline)"
cmd = "git fetch && git config --get remote.origin.url"
url = (
subprocess.check_output(cmd, shell=True).decode().strip().rstrip(".git")
) # github repo url
branch = (
subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True)
.decode()
.strip()
) # checked out
n = int(
subprocess.check_output(
f"git rev-list {branch}..origin/master --count", shell=True
)
) # commits behind
if n > 0:
s = (
f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. "
f"Use 'git pull' to update or 'git clone {url}' to download latest."
)
else:
s = f"up to date with {url} ✅"
print(emojis(s)) # emoji-safe
except Exception as e:
print(e)
def check_requirements(file="requirements.txt", exclude=()):
# Check installed dependencies meet requirements
import pkg_resources as pkg
prefix = colorstr("red", "bold", "requirements:")
file = Path(file)
if not file.exists():
print(f"{prefix} {file.resolve()} not found, check failed.")
return
n = 0 # number of packages updates
requirements = [
f"{x.name}{x.specifier}"
for x in pkg.parse_requirements(file.open())
if x.name not in exclude
]
for r in requirements:
try:
pkg.require(r)
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
n += 1
print(
f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update..."
)
print(
subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()
)
if n: # if packages updated
s = (
f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n"
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
)
print(emojis(s)) # emoji-safe
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print(
"WARNING: --img-size %g must be multiple of max stride %g, updating to %g"
% (img_size, s, new_size)
)
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not isdocker(), "cv2.imshow() is disabled in Docker environments"
cv2.imshow("test", np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(
f"WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}"
)
return False
def check_file(file):
# Search for file if not found
if os.path.isfile(file) or file == "":
return file
else:
files = glob.glob("./**/" + file, recursive=True) # find file
assert len(files), "File Not Found: %s" % file # assert file was found
assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (
file,
files,
) # assert unique
return files[0] # return file
def check_dataset(dict):
# Download dataset if not found locally
val, s = dict.get("val"), dict.get("download")
if val and len(val):
val = [
Path(x).resolve() for x in (val if isinstance(val, list) else [val])
] # val path
if not all(x.exists() for x in val):
print(
"\nWARNING: Dataset not found, nonexistent paths: %s"
% [str(x) for x in val if not x.exists()]
)
if s and len(s): # download script
print("Downloading %s ..." % s)
if s.startswith("http") and s.endswith(".zip"): # URL
f = Path(s).name # filename
torch.hub.download_url_to_file(s, f)
r = os.system("unzip -q %s -d ../ && rm %s" % (f, f)) # unzip
else: # bash script
r = os.system(s)
print(
"Dataset autodownload %s\n" % ("success" if r == 0 else "failure")
) # analyze return value
else:
raise Exception("Dataset not found.")
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = (
input if len(input) > 1 else ("blue", "bold", input[0])
) # color arguments, string
colors = {
"black": "\033[30m", # basic colors
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"magenta": "\033[35m",
"cyan": "\033[36m",
"white": "\033[37m",
"bright_black": "\033[90m", # bright colors
"bright_red": "\033[91m",
"bright_green": "\033[92m",
"bright_yellow": "\033[93m",
"bright_blue": "\033[94m",
"bright_magenta": "\033[95m",
"bright_cyan": "\033[96m",
"bright_white": "\033[97m",
"end": "\033[0m", # misc
"bold": "\033[1m",
"underline": "\033[4m",
}
return "".join(colors[x] for x in args) + f"{string}" + colors["end"]
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array(
[np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]
)
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
27,
28,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
67,
70,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
84,
85,
86,
87,
88,
89,
90,
]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = (
x[inside],
y[inside],
)
return (
np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4))
) # cls, xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = (
np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)])
.reshape(2, -1)
.T
) # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(
img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]
) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (
img1_shape[0] - img0_shape[0] * gain
) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * (
torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)
).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(
b1_x1, b2_x1
) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = (
(b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2
+ (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2
) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif (
CIoU
): # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(
torch.atan(w2 / h2) - torch.atan(w1 / h1), 2
)
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (
(
torch.min(box1[:, None, 2:], box2[:, 2:])
- torch.max(box1[:, None, :2], box2[:, :2])
)
.clamp(0)
.prod(2)
)
return inter / (
area1[:, None] + area2 - inter
) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (
wh1.prod(2) + wh2.prod(2) - inter
) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(
prediction,
conf_thres=0.25,
iou_thres=0.45,
classes=None,
agnostic=False,
multi_label=True,
labels=(),
merge=False,
):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
# merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(
1, keepdim=True
) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f"WARNING: NMS time limit {time_limit}s exceeded")
break # time limit exceeded
return output
def strip_optimizer(
f="best.pt", s=""
): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device("cpu"))
if x.get("ema"):
x["model"] = x["ema"] # replace model with ema
for k in "optimizer", "training_results", "wandb_id", "ema", "updates": # keys
x[k] = None
x["epoch"] = -1
x["model"].half() # to FP16
for p in x["model"].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1e6 # filesize
print(
f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB"
)
def print_mutation(hyp, results, yaml_file="hyp_evolved.yaml", bucket=""):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = "%10s" * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = "%10.3g" * len(hyp) % tuple(hyp.values()) # hyperparam values
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
print("\n%s\n%s\nEvolved fitness: %s\n" % (a, b, c))
if bucket:
url = "gs://%s/evolve.txt" % bucket
if gsutil_getsize(url) > (
os.path.getsize("evolve.txt") if os.path.exists("evolve.txt") else 0
):
os.system(
"gsutil cp %s ." % url
) # download evolve.txt if larger than local
with open("evolve.txt", "a") as f: # append result
f.write(c + b + "\n")
x = np.unique(np.loadtxt("evolve.txt", ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt("evolve.txt", x, "%10.3g") # save sort by fitness
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, "w") as f:
results = tuple(x[0, :7])
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write(
"# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: "
% len(x)
+ c
+ "\n\n"
)
yaml.dump(hyp, f, sort_keys=False)
if bucket:
os.system("gsutil cp evolve.txt %s gs://%s" % (yaml_file, bucket)) # upload
def apply_classifier(x, model, img, im0):
# applies a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]) : int(a[3]), int(a[0]) : int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('test%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(
1
) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=True, sep=""):
# Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
path = Path(path) # os-agnostic
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
return f"{path}{sep}{n}" # update path
| 33.32913
| 118
| 0.536625
|
f3d3e6658a457335b640f8787eb39222e7b07a95
| 1,299
|
py
|
Python
|
src/test.py
|
candYgene/pbg-ld
|
7f502c44d21c4ecda3fed6d3b531d4c98fdedde6
|
[
"Apache-2.0"
] | 1
|
2020-07-22T07:39:50.000Z
|
2020-07-22T07:39:50.000Z
|
src/test.py
|
candYgene/pbg-ld
|
7f502c44d21c4ecda3fed6d3b531d4c98fdedde6
|
[
"Apache-2.0"
] | 45
|
2017-07-12T07:19:08.000Z
|
2019-09-02T12:44:38.000Z
|
src/test.py
|
candYgene/pbg-ld
|
7f502c44d21c4ecda3fed6d3b531d4c98fdedde6
|
[
"Apache-2.0"
] | 4
|
2017-07-12T07:12:16.000Z
|
2019-07-09T09:34:29.000Z
|
from __future__ import print_function
from SPARQLWrapper import SPARQLWrapper, JSON
import sys
infile = sys.argv[1] if len(sys.argv) > 1 else "graphs.txt"
endpoint = sys.argv[2] if len(sys.argv) > 2 else "http://localhost:8890/sparql"
graphs = dict()
try:
with open(infile, "r") as fin:
for line in fin:
graphs[line.strip()] = 0
except IOError as err:
sys.exit(err)
if len(graphs) == 0:
sys.exit("No graph URIs found.")
try:
sparql = SPARQLWrapper(endpoint)
sparql.setQuery("""
SELECT
?g COUNT(*) AS ?n
WHERE {
GRAPH ?g { ?s ?p ?o }
}
GROUP BY ?g
""")
sparql.setReturnFormat(JSON)
res = sparql.query().convert()
for r in res["results"]["bindings"]:
g = r['g']['value']
n = int(r['n']['value'])
if g in graphs:
graphs[g] = n
except:
sys.exit("Failed to connect to the SPARQL endpoint '{0}'.".format(endpoint))
exit_code = 0
print("# graph_uri\tn_triples")
for g,n in sorted(graphs.items(), key=lambda x: x[1], reverse=True):
if n == 0:
exit_code = 1
print("{0}\t{1}".format(g,n), file=sys.stdout)
if exit_code != 0:
print("\n*** ERROR: Ingested RDF graph(s) must not be empty. ***", file=sys.stderr)
sys.exit(exit_code)
| 25.470588
| 87
| 0.585835
|
afffe5e3fac9ddff7a7c35ae0dd8c2b800b27d05
| 2,882
|
py
|
Python
|
populous/inlines/models.py
|
caiges/populous
|
d07094f9d6b2528d282ed99af0063002480bc00b
|
[
"BSD-3-Clause"
] | 2
|
2016-05-09T01:17:08.000Z
|
2017-07-18T23:35:01.000Z
|
populous/inlines/models.py
|
caiges/populous
|
d07094f9d6b2528d282ed99af0063002480bc00b
|
[
"BSD-3-Clause"
] | null | null | null |
populous/inlines/models.py
|
caiges/populous
|
d07094f9d6b2528d282ed99af0063002480bc00b
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.template.defaultfilters import capfirst
from populous.inlines.managers import RegisteredInlineManager, RegisteredInlineFieldManager
from populous.inlines.utils import get_absolute_schema_path
class RecurringInline(models.Model):
title = models.CharField(max_length=200, unique=True)
content = models.TextField(help_text='Raw HTML is allowed.')
class Meta:
ordering = ('title',)
def __unicode__(self):
return self.title
class RegisteredInline(models.Model):
name = models.CharField(max_length=500)
description = models.TextField(blank=True)
author = models.CharField(max_length=800, blank=True)
app_label = models.CharField(max_length=500, editable=False)
class_name = models.CharField(max_length=500, editable=False)
inline_name = models.SlugField(max_length=500, editable=False)
objects = RegisteredInlineManager()
class Meta:
verbose_name = 'inline'
unique_together = (('app_label', 'class_name', 'inline_name'),)
def __unicode__(self):
return self.name
@models.permalink
def get_form_url(self):
return ('inlines-admin-form', (), {
'app_label': self.app_label,
'inline_name': self.inline_name})
@property
def inline_class(self):
if not hasattr(self, '_inline_class'):
app_name = models.get_app(self.app_label).__name__
mod_name = "%sinlines" % app_name.rstrip('models')
mod = __import__(mod_name, fromlist=[mod_name])
setattr(self, '_inline_class', getattr(mod, self.class_name))
return getattr(self, '_inline_class')
def get_form(self):
return self.inline_class.form
## TODO: Currently field-level control doesn't work.
class RegisteredInlineField(models.Model):
app_label = models.CharField(max_length=500, editable=False)
model_name = models.CharField(max_length=500, editable=False)
field_name = models.CharField(max_length=500, editable=False)
schema_path = models.CharField(max_length=800, editable=False)
objects = RegisteredInlineFieldManager()
class Meta:
verbose_name = 'inline field'
unique_together = (('app_label', 'model_name', 'field_name'),)
def __unicode__(self):
return u"%s.%s" % (capfirst(self.model_name), self.field_name)
def get_absolute_schema_path(self):
return get_absolute_schema_path(self.schema_path)
class AllowedField(models.Model):
inline = models.ForeignKey(RegisteredInline)
field = models.ForeignKey(RegisteredInlineField)
sites = models.ManyToManyField(Site)
def __unicode__(self):
return unicode(self.field)
| 34.309524
| 91
| 0.696044
|
5873a39b0334c765605e178044e052cf95dfcd91
| 3,452
|
py
|
Python
|
openstack_dashboard/contrib/trove/content/database_clusters/tabs.py
|
Tesora-Release/tesora-horizon
|
4b0f26d48551783c1fe3ae362d1f8d27570195a9
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/contrib/trove/content/database_clusters/tabs.py
|
Tesora-Release/tesora-horizon
|
4b0f26d48551783c1fe3ae362d1f8d27570195a9
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/contrib/trove/content/database_clusters/tabs.py
|
Tesora-Release/tesora-horizon
|
4b0f26d48551783c1fe3ae362d1f8d27570195a9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 eBay Software Foundation
# Copyright 2015 HP Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.contrib.trove import api
from openstack_dashboard.contrib.trove.content.database_clusters import tables
from openstack_dashboard.contrib.trove.content.databases import db_capability
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
def get_context_data(self, request):
return {"cluster": self.tab_group.kwargs['cluster']}
def get_template_name(self, request):
cluster = self.tab_group.kwargs['cluster']
template_file = ('project/database_clusters/_detail_overview_%s.html'
% self._get_template_type(cluster.datastore['type']))
try:
template.loader.get_template(template_file)
return template_file
except template.TemplateDoesNotExist:
# This datastore type does not have a template file
# Just use the base template file
return ('project/database_clusters/_detail_overview.html')
def _get_template_type(self, datastore):
if db_capability.is_datastax_enterprise(datastore):
return 'cassandra'
return datastore
class InstancesTab(tabs.TableTab):
table_classes = (tables.InstancesTable,)
name = _("Instances")
slug = "instances_tab"
cluster = None
template_name = "horizon/common/_detail_table.html"
preload = True
def get_instances_data(self):
cluster = self.tab_group.kwargs['cluster']
data = []
try:
instances = api.trove.cluster_get(self.request,
cluster.id).instances
for instance in instances:
instance_info = api.trove.instance_get(self.request,
instance['id'])
flavor_id = instance_info.flavor['id']
instance_info.full_flavor = api.trove.flavor_get(self.request,
flavor_id)
if "type" in instance:
instance_info.type = instance["type"]
if "ip" in instance:
instance_info.ip = instance["ip"]
if "hostname" in instance:
instance_info.hostname = instance["hostname"]
data.append(instance_info)
except Exception:
msg = _('Unable to get instances data.')
exceptions.handle(self.request, msg)
data = []
return data
class ClusterDetailTabs(tabs.TabGroup):
slug = "cluster_details"
tabs = (OverviewTab, InstancesTab)
sticky = True
| 37.521739
| 78
| 0.635284
|
41c2dd92b49e317be5383432d364b5a8675bcc62
| 24
|
py
|
Python
|
IMP Units/08 ----.py
|
andrestelex/Impallari-Fontlab-Macros
|
ed9f57715dd376b513ce1a7ddb4d683e893a849b
|
[
"Apache-2.0"
] | 10
|
2015-02-25T11:38:56.000Z
|
2020-11-19T22:47:08.000Z
|
IMP Units/08 ----.py
|
andrestelex/Impallari-Fontlab-Macros
|
ed9f57715dd376b513ce1a7ddb4d683e893a849b
|
[
"Apache-2.0"
] | 3
|
2015-06-24T05:03:06.000Z
|
2016-07-28T00:37:31.000Z
|
IMP Units/08 ----.py
|
andrestelex/Impallari-Fontlab-Macros
|
ed9f57715dd376b513ce1a7ddb4d683e893a849b
|
[
"Apache-2.0"
] | 5
|
2017-04-20T11:09:47.000Z
|
2021-05-19T22:17:53.000Z
|
#FLM: --------
pass
| 6
| 15
| 0.291667
|
eb7ab5f9055209ab40d8122acb7da5e53fcf587c
| 2,199
|
py
|
Python
|
guild/__init__.py
|
flamato/guildai
|
cd6ad6a6b45ea4ff7561318b222888aeb1a22942
|
[
"Apache-2.0"
] | 1
|
2019-05-31T14:44:07.000Z
|
2019-05-31T14:44:07.000Z
|
guild/__init__.py
|
flamato/guildai
|
cd6ad6a6b45ea4ff7561318b222888aeb1a22942
|
[
"Apache-2.0"
] | null | null | null |
guild/__init__.py
|
flamato/guildai
|
cd6ad6a6b45ea4ff7561318b222888aeb1a22942
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2019 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
import subprocess
__version__ = "0.6.0"
__requires__ = [
# (<required module>, <distutils package req>)
("pip", "pip"),
("yaml", "PyYAML"),
("setuptools", "setuptools"),
("six", "six"),
("tabview", "tabview"),
("twine", "twine"),
("werkzeug", "Werkzeug"),
("whoosh", "Whoosh"),
]
__pkgdir__ = os.path.dirname(os.path.dirname(__file__))
def _try_init_git_attrs():
try:
_init_git_commit()
except (OSError, subprocess.CalledProcessError):
pass
else:
try:
_init_git_status()
except (OSError, subprocess.CalledProcessError):
pass
def _init_git_commit():
commit = _git_cmd("git -C \"%(repo)s\" log -1 --oneline | cut -d' ' -f1")
globals()["__git_commit__"] = commit
def _init_git_status():
raw = _git_cmd("git -C \"%(repo)s\" status -s")
globals()["__git_status__"] = raw.split("\n") if raw else []
def _git_cmd(cmd, **kw):
repo = os.path.dirname(__file__)
cmd = cmd % dict(repo=repo, **kw)
null = open(os.devnull, "w")
out = subprocess.check_output(cmd, stderr=null, shell=True)
return out.decode("utf-8").strip()
def version():
git_commit = globals().get("__git_commit__")
if git_commit:
git_status = globals().get("__git_status__", [])
workspace_changed_marker = "*" if git_status else ""
return "%s (dev %s%s)" % (__version__, git_commit,
workspace_changed_marker)
else:
return __version__
_try_init_git_attrs()
| 29.716216
| 77
| 0.65075
|
6931f789025c3a2955a885ac200628a869cf89f2
| 6,414
|
py
|
Python
|
SoftLayer/CLI/core.py
|
briancline/softlayer-python
|
679fb62ba2db095f0177f9d9488ff4a80c3b7387
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/core.py
|
briancline/softlayer-python
|
679fb62ba2db095f0177f9d9488ff4a80c3b7387
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/core.py
|
briancline/softlayer-python
|
679fb62ba2db095f0177f9d9488ff4a80c3b7387
|
[
"MIT"
] | null | null | null |
"""
SoftLayer.CLI.core
~~~~~~~~~~~~~~~~~~
Core for the SoftLayer CLI
:license: MIT, see LICENSE for more details.
"""
from __future__ import print_function
import logging
import sys
import types
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
import click
# pylint: disable=too-many-public-methods, broad-except, unused-argument
# pylint: disable=redefined-builtin, super-init-not-called
DEBUG_LOGGING_MAP = {
0: logging.CRITICAL,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG
}
VALID_FORMATS = ['table', 'raw', 'json']
DEFAULT_FORMAT = 'raw'
if sys.stdout.isatty():
DEFAULT_FORMAT = 'table'
class CommandLoader(click.MultiCommand):
"""Loads module for click."""
def __init__(self, *path, **attrs):
click.MultiCommand.__init__(self, **attrs)
self.path = path
def list_commands(self, ctx):
"""Get module for click."""
env = ctx.ensure_object(environment.Environment)
env.load()
return sorted(env.list_commands(*self.path))
def get_command(self, ctx, name):
"""Get command for click."""
env = ctx.ensure_object(environment.Environment)
env.load()
# Do alias lookup (only available for root commands)
if len(self.path) == 0:
name = env.resolve_alias(name)
new_path = list(self.path)
new_path.append(name)
module = env.get_command(*new_path)
if isinstance(module, types.ModuleType):
return CommandLoader(*new_path, help=module.__doc__)
else:
return module
@click.group(help="SoftLayer Command-line Client",
epilog="""To use most commands your SoftLayer
username and api_key need to be configured. The easiest way to do that is to
use: 'slcli setup'""",
cls=CommandLoader,
context_settings={'help_option_names': ['-h', '--help'],
'auto_envvar_prefix': 'SLCLI'})
@click.option('--format',
default=DEFAULT_FORMAT,
help="Output format",
type=click.Choice(VALID_FORMATS))
@click.option('--config', '-C',
required=False,
default=click.get_app_dir('softlayer', force_posix=True),
help="Config file location",
type=click.Path(resolve_path=True))
@click.option('--debug',
required=False,
default=None,
help="Sets the debug noise level",
type=click.Choice(sorted([str(key) for key
in DEBUG_LOGGING_MAP.keys()])))
@click.option('--verbose', '-v',
help="Sets the debug noise level",
type=click.IntRange(0, 3, clamp=True),
count=True)
@click.option('--timings',
required=False,
is_flag=True,
help="Time each API call and display after results")
@click.option('--proxy',
required=False,
help="HTTP[S] proxy to be use to make API calls")
@click.option('--really / --not-really', '-y',
is_flag=True,
required=False,
help="Confirm all prompt actions")
@click.option('--fixtures / --no-fixtures',
envvar='SL_FIXTURES',
is_flag=True,
required=False,
help="Use fixtures instead of actually making API calls")
@click.version_option(prog_name="slcli (SoftLayer Command-line)")
@environment.pass_env
def cli(env,
format='table',
config=None,
debug=0,
verbose=0,
proxy=None,
really=False,
fixtures=False,
**kwargs):
"""Main click CLI entry-point."""
# Set logging level
if debug is not None:
verbose = int(debug)
if verbose:
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.DEBUG))
# Populate environement with client and set it as the context object
env.skip_confirmations = really
env.config_file = config
env.format = format
if env.client is None:
# Environment can be passed in explicitly. This is used for testing
if fixtures:
client = SoftLayer.BaseClient(
transport=SoftLayer.FixtureTransport(),
auth=None,
)
else:
# Create SL Client
client = SoftLayer.create_client_from_env(
proxy=proxy,
config_file=config,
)
env.client = client
env.vars['timings'] = SoftLayer.TimingTransport(env.client.transport)
env.client.transport = env.vars['timings']
@cli.resultcallback()
@environment.pass_env
def output_result(env, timings=False, *args, **kwargs):
"""Outputs the results returned by the CLI and also outputs timings."""
if timings and env.vars.get('timings'):
timing_table = formatting.Table(['service', 'method', 'time'])
calls = env.vars['timings'].get_last_calls()
for call, _, duration in calls:
timing_table.add_row([call.service, call.method, duration])
env.err(env.fmt(timing_table))
def main(reraise_exceptions=False, **kwargs):
"""Main program. Catches several common errors and displays them nicely."""
exit_status = 0
try:
cli.main(**kwargs)
except SoftLayer.SoftLayerAPIError as ex:
if 'invalid api token' in ex.faultString.lower():
print("Authentication Failed: To update your credentials,"
" use 'slcli config setup'")
exit_status = 1
else:
print(str(ex))
exit_status = 1
except SoftLayer.SoftLayerError as ex:
print(str(ex))
exit_status = 1
except exceptions.CLIAbort as ex:
print(str(ex.message))
exit_status = ex.code
except Exception:
if reraise_exceptions:
raise
import traceback
print("An unexpected error has occured:")
print(str(traceback.format_exc()))
print("Feel free to report this error as it is likely a bug:")
print(" https://github.com/softlayer/softlayer-python/issues")
exit_status = 1
sys.exit(exit_status)
if __name__ == '__main__':
main()
| 31.441176
| 79
| 0.605239
|
4e53f43c38ab2defe49bb29209da753dc1ae5733
| 811
|
py
|
Python
|
pesados_el_norte/core_app/urls.py
|
leohakim/new_pesados
|
624616b17c2d7ddaf71b67890a218213cab580f0
|
[
"MIT"
] | null | null | null |
pesados_el_norte/core_app/urls.py
|
leohakim/new_pesados
|
624616b17c2d7ddaf71b67890a218213cab580f0
|
[
"MIT"
] | null | null | null |
pesados_el_norte/core_app/urls.py
|
leohakim/new_pesados
|
624616b17c2d7ddaf71b67890a218213cab580f0
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from pesados_el_norte.core_app.views.caja import CajaListView
# from pesados_el_norte.users.views import (
# user_detail_view,
# user_redirect_view,
# user_update_view,
# )
app_name = "core"
urlpatterns = [
path(
"home/",
login_required(TemplateView.as_view(template_name="core/home.html")),
name="home",
),
path(
"caja/",
view=CajaListView.as_view(template_name='core/caja.html'),
name="caja",
),
# path("~redirect/", view=user_redirect_view, name="redirect"),
# path("~update/", view=user_update_view, name="update"),
# path("<str:username>/", view=user_detail_view, name="detail"),
]
| 27.965517
| 77
| 0.675709
|
404c0d67cc6f7672d10ce3ae29e9770491c05282
| 631
|
py
|
Python
|
examples/sharepoint/files/download_file_from_url.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | null | null | null |
examples/sharepoint/files/download_file_from_url.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | null | null | null |
examples/sharepoint/files/download_file_from_url.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | null | null | null |
import os
import tempfile
from office365.sharepoint.files.file import File
from tests import test_client_credentials, test_site_url
abs_file_url = "{site_url}sites/team/Shared Documents/big_buck_bunny.mp4".format(site_url=test_site_url)
with tempfile.TemporaryDirectory() as local_path:
file_name = os.path.basename(abs_file_url)
with open(os.path.join(local_path, file_name), 'wb') as local_file:
file = File.from_url(abs_file_url).with_credentials(test_client_credentials).download(local_file).execute_query()
print("'{0}' file has been downloaded into {1}".format(file.serverRelativeUrl, local_file.name))
| 48.538462
| 121
| 0.795563
|
2455c6c1aefd34aed0f73f88927404a14b076ca4
| 1,484
|
py
|
Python
|
py_grpc_profile/aio/server/interceptor.py
|
fossabot/py-grpc-profile
|
c68540891efb06b3bb8d6beb54238bd9e03c40c8
|
[
"Apache-2.0"
] | null | null | null |
py_grpc_profile/aio/server/interceptor.py
|
fossabot/py-grpc-profile
|
c68540891efb06b3bb8d6beb54238bd9e03c40c8
|
[
"Apache-2.0"
] | 2
|
2021-03-23T23:49:14.000Z
|
2022-02-11T03:38:44.000Z
|
py_grpc_profile/aio/server/interceptor.py
|
fossabot/py-grpc-profile
|
c68540891efb06b3bb8d6beb54238bd9e03c40c8
|
[
"Apache-2.0"
] | 1
|
2021-03-23T23:47:17.000Z
|
2021-03-23T23:47:17.000Z
|
from typing import Awaitable, Callable, Optional
import grpc
from py_grpc_profile.adapter import Adapter, CProfileAdapter
from py_grpc_profile.server.interceptor import get_rcp_handler, split_method_call
class ProfileInterceptor(grpc.aio.ServerInterceptor):
def __init__(self, profiler: Optional[Adapter] = None):
if profiler is None:
profiler = CProfileAdapter()
self.profiler = profiler
async def intercept_service(
self,
continuation: Callable[
[grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]
],
handler_call_details: grpc.HandlerCallDetails,
) -> grpc.RpcMethodHandler:
handler = await continuation(handler_call_details)
behavior, handler_factory = get_rcp_handler(handler)
def _intercept(request_or_iterator, servicer_context):
grpc_service_name, grpc_method_name = split_method_call(
handler_call_details
)
return self.profiler.run(
behavior,
request_or_iterator,
servicer_context,
{
"grpc_service_name": grpc_service_name,
"grpc_method_name": grpc_method_name,
},
)
return handler_factory(
behavior=_intercept,
request_deserializer=handler.request_deserializer,
response_serializer=handler.response_serializer,
)
| 32.977778
| 81
| 0.644879
|
ccdba954e8bb6b6df62b2a814980b832b2610c22
| 3,604
|
py
|
Python
|
apps/hbase/src/hbase/management/commands/hbase_setup.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
apps/hbase/src/hbase/management/commands/hbase_setup.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
apps/hbase/src/hbase/management/commands/hbase_setup.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand
from desktop.lib.paths import get_apps_root
from useradmin.models import install_sample_user, User
from hbased.ttypes import AlreadyExists
from hbase.api import HbaseApi
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Create and fill some demo tables in the first configured cluster.'
args = '<username>'
def handle(self, *args, **options):
if args:
user = args[0]
elif options and options['user']:
user = options['user']
else:
user = install_sample_user()
api = HbaseApi(user=user)
cluster_name = api.getClusters()[0]['name'] # Currently pick first configured cluster
# Check connectivity
api.connectCluster(cluster_name)
self.create_analytics_table(api, cluster_name)
self.load_analytics_table(api, cluster_name)
self.create_binary_table(api, cluster_name)
self.load_binary_table(api, cluster_name)
def create_analytics_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'analytics_demo', [{'properties': {'name': 'hour'}}, {'properties': {'name': 'day'}}, {'properties': {'name': 'total'}}])
except AlreadyExists:
pass
def load_analytics_table(self, api, cluster_name):
table_data = os.path.join(get_apps_root(), 'hbase', 'example', 'analytics', 'hbase-analytics.tsv')
api.bulkUpload(cluster_name, 'analytics_demo', open(table_data))
def create_binary_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'document_demo', [{'properties': {'name': 'doc'}}])
except AlreadyExists:
pass
def load_binary_table(self, api, cluster_name):
today = datetime.now().strftime('%Y%m%d')
tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y%m%d')
api.putRow(cluster_name, 'document_demo', today, {'doc:txt': 'Hue is awesome!'})
api.putRow(cluster_name, 'document_demo', today, {'doc:json': '{"user": "hue", "coolness": "extra"}'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I like HBase</xml>'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I LOVE HBase</xml>'})
root = os.path.join(get_apps_root(), 'hbase', 'example', 'documents')
api.putRow(cluster_name, 'document_demo', today, {'doc:img': open(root + '/hue-logo.png', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:html': open(root + '/gethue.com.html', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:pdf': open(root + '/gethue.pdf', "rb").read()})
| 37.936842
| 157
| 0.711432
|
3343c7294d763f6a4adfdecfa2ee71a822372855
| 13,948
|
py
|
Python
|
tests/test_toast_load.py
|
nad-dch/sotodlib
|
1a88233ea1be1a5bdebe08df057574e4cfe95298
|
[
"MIT"
] | null | null | null |
tests/test_toast_load.py
|
nad-dch/sotodlib
|
1a88233ea1be1a5bdebe08df057574e4cfe95298
|
[
"MIT"
] | null | null | null |
tests/test_toast_load.py
|
nad-dch/sotodlib
|
1a88233ea1be1a5bdebe08df057574e4cfe95298
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2018-2020 Simons Observatory.
# Full license can be found in the top level "LICENSE" file.
"""Test toast data loading.
"""
from glob import glob
import sys
import os
import numpy as np
from numpy.testing import (
assert_equal, assert_array_almost_equal, assert_array_equal, assert_allclose,
)
from unittest import TestCase
from ._helpers import create_outdir
from sotodlib.sim_hardware import get_example
from sotodlib.sim_hardware import sim_telescope_detectors
# Import so3g first so that it can control the import and monkey-patching
# of spt3g. Then our import of spt3g_core will use whatever has been imported
# by so3g.
import so3g
from spt3g import core as core3g
toast_available = None
if toast_available is None:
try:
import toast
from toast.mpi import MPI
from toast.todmap import TODGround
from toast.tod import AnalyticNoise
from sotodlib.toast.export import ToastExport
from sotodlib.toast.load import load_data
toast_available = True
except ImportError:
toast_available = False
class ToastLoadTest(TestCase):
def setUp(self):
fixture_name = os.path.splitext(os.path.basename(__file__))[0]
if not toast_available:
print("toast cannot be imported- skipping unit tests", flush=True)
return
self.outdir = None
if MPI.COMM_WORLD.rank == 0:
self.outdir = create_outdir(fixture_name)
self.outdir = MPI.COMM_WORLD.bcast(self.outdir, root=0)
toastcomm = toast.Comm()
self.data = toast.Data(toastcomm)
# Focalplane
hwfull = get_example()
dets = sim_telescope_detectors(hwfull, "SAT4")
hwfull.data["detectors"] = dets
hw = hwfull.select(
match={"wafer_slot": "w42", "band": "f030", "pixel": "00[01]"})
# print(hw.data["detectors"], flush=True)
detquats = {k: v["quat"] for k, v in hw.data["detectors"].items()}
# File dump size in bytes (1MB)
self.dumpsize = 2 ** 20
# Samples per observation
self.totsamp = 10000
# Pixelization
nside = 512
self.sim_nside = nside
self.map_nside = nside
# Scan properties
self.site_lon = '-67:47:10'
self.site_lat = '-22:57:30'
self.site_alt = 5200.
self.coord = 'C'
self.azmin = 45
self.azmax = 55
self.el = 60
self.scanrate = 1.0
self.scan_accel = 0.1
self.CES_start = None
# Noise properties
self.rate = 100.0
self.NET = 1e-3 # 1 mK NET
self.epsilon = 0.0
self.fmin = 1.0e-5
self.alpha = 1.0
self.fknee = 0.05
for ob in range(3):
ftime = (self.totsamp / self.rate) * ob + 1564015655.88
tod = TODGround(
self.data.comm.comm_group,
detquats,
self.totsamp,
detranks=self.data.comm.group_size,
firsttime=ftime,
rate=self.rate,
site_lon=self.site_lon,
site_lat=self.site_lat,
site_alt=self.site_alt,
azmin=self.azmin,
azmax=self.azmax,
el=self.el,
coord=self.coord,
scanrate=self.scanrate,
scan_accel=self.scan_accel,
CES_start=self.CES_start)
# Analytic noise model
detnames = list(detquats.keys())
drate = {x: self.rate for x in detnames}
dfmin = {x: self.fmin for x in detnames}
dfknee = {x: self.fknee for x in detnames}
dalpha = {x: self.alpha for x in detnames}
dnet = {x: self.NET for x in detnames}
nse = AnalyticNoise(
rate=drate,
fmin=dfmin,
detectors=detnames,
fknee=dfknee,
alpha=dalpha,
NET=dnet
)
# Single observation
obs = dict()
obs["tod"] = tod
obs["noise"] = nse
obs["id"] = 12345
obs["intervals"] = tod.subscans
obs["site"] = "SimonsObs"
obs["telescope"] = "SAT4"
obs["site_id"] = 1
obs["telescope_id"] = 4
obs["fpradius"] = 5.0
obs["start_time"] = ftime
obs["altitude"] = self.site_alt
obs["name"] = "test_{:02}".format(ob)
# Add the observation to the dataset
self.data.obs.append(obs)
# Simulate some noise into multiple cache prefixes. This is used
# to test the export of multiple timestream flavors.
nse = toast.tod.OpSimNoise(out="signal", realization=0)
nse.exec(self.data)
nse = toast.tod.OpSimNoise(out="component1", realization=0)
nse.exec(self.data)
nse = toast.tod.OpSimNoise(out="component2", realization=0)
nse.exec(self.data)
return
def test_load(self):
if not toast_available:
return
tod = self.data.obs[0]["tod"]
# Dump to disk
prefix = "sat4"
dumper = ToastExport(
self.outdir,
prefix=prefix,
use_intervals=True,
cache_name="signal",
cache_copy=["component1", "component2"],
mask_flag_common=tod.TURNAROUND,
filesize=self.dumpsize,
units=core3g.G3TimestreamUnits.Tcmb,
verbose=False,
)
dumper.exec(self.data)
# Load the data back in
checkdata = load_data(self.outdir, comm=self.data.comm, prefix=prefix)
checkdata.info(sys.stdout)
return
def test_load_split(self):
if not toast_available:
return
obs = self.data.obs[0]
tod = obs["tod"]
# Split the detectors into separate groups
dets = sorted(tod.detectors)
detgroups = {}
for idet, det in enumerate(dets):
detgroups["group{}".format(idet)] = [det]
outdir = self.outdir + "_split"
prefix = "sat3"
# Dump to disk
dumper = ToastExport(
outdir,
prefix=prefix,
use_intervals=True,
cache_name="signal",
cache_copy=["component1", "component2"],
mask_flag_common=tod.TURNAROUND,
filesize=self.dumpsize,
units=core3g.G3TimestreamUnits.Tcmb,
detgroups=detgroups,
verbose=False,
)
dumper.exec(self.data)
# Count the number of resulting files
files = glob("{}/{}/sat3_group*_00000000.g3".format(outdir, obs['name']))
assert_equal(len(files), len(detgroups),
"Exported files ({}) does not match the detector "
"groups ({})".format(files, detgroups))
# Load the data back in
checkdata = load_data(outdir, comm=self.data.comm,
prefix="{}_{}".format(prefix, "group."),
all_flavors=True)
# Verify that input and output are equal
checktod = checkdata.obs[0]["tod"]
times = tod.local_times()
checktimes = checktod.local_times()
assert_allclose(checktimes, times, rtol=1e-6, err_msg="Timestamps do not agree")
cflags = ((tod.local_common_flags() & tod.TURNAROUND) != 0).astype(np.uint8)
checkcflags = checktod.local_common_flags()
assert_array_equal(checkcflags, cflags, err_msg="Common flags do not agree")
for det in dets:
sig0 = tod.local_signal(det)
sig1 = tod.local_signal(det, "component1")
sig2 = tod.local_signal(det, "component2")
checksig0 = checktod.local_signal(det)
checksig1 = checktod.local_signal(det, "component1")
checksig2 = checktod.local_signal(det, "component2")
assert_allclose(checksig0, sig0, rtol=1e-6,
err_msg="Signal0 does not agree")
assert_allclose(checksig1, sig1, rtol=1e-6,
err_msg="Signal1 does not agree")
assert_allclose(checksig2, sig2, rtol=1e-6,
err_msg="Signal2 does not agree")
flg = tod.local_flags(det)
checkflg = checktod.local_flags(det)
assert_array_equal(checkflg, flg, err_msg="Flags do not agree")
return
def test_load_compressed(self):
if not toast_available:
return
obs = self.data.obs[0]
tod = obs["tod"]
# We'll write the file with and without one detector
# to measure the change in the TOD size
dets = sorted(tod.detectors)
detgroups = {'all_but_one' : []}
for det in enumerate(dets[1:]):
detgroups["all_but_one"].append(det)
# uncompressed output
outdir = self.outdir
uncompressed_prefix = "sat3_uncompressed"
dumper = ToastExport(
outdir,
prefix=uncompressed_prefix,
use_intervals=True,
mask_flag_common=tod.TURNAROUND,
filesize=self.dumpsize * 100,
units=core3g.G3TimestreamUnits.Tcmb,
compress=False,
verbose=False,
)
dumper.exec(self.data)
dumper = ToastExport(
outdir,
prefix=uncompressed_prefix,
use_intervals=True,
mask_flag_common=tod.TURNAROUND,
filesize=self.dumpsize * 100,
units=core3g.G3TimestreamUnits.Tcmb,
compress=False,
detgroups=detgroups,
verbose=False,
)
dumper.exec(self.data)
# compressed output
compressed_prefix = "sat3_compressed"
dumper = ToastExport(
outdir,
prefix=compressed_prefix,
use_intervals=True,
mask_flag_common=tod.TURNAROUND,
filesize=self.dumpsize * 100,
units=core3g.G3TimestreamUnits.Tcmb,
compress=True,
verbose=False,
)
dumper.exec(self.data)
compressed_prefix = "sat3_compressed"
dumper = ToastExport(
outdir,
prefix=compressed_prefix,
use_intervals=True,
mask_flag_common=tod.TURNAROUND,
filesize=self.dumpsize * 100,
units=core3g.G3TimestreamUnits.Tcmb,
compress=True,
detgroups=detgroups,
verbose=False,
)
dumper.exec(self.data)
# Very high compression ratio
very_compressed_prefix = "sat3_very_compressed"
rmstarget = 2 ** 8
dumper = ToastExport(
outdir,
prefix=very_compressed_prefix,
use_intervals=True,
mask_flag_common=tod.TURNAROUND,
filesize=self.dumpsize * 100,
units=core3g.G3TimestreamUnits.Tcmb,
compress={"rmstarget" : rmstarget},
verbose=False,
)
dumper.exec(self.data)
dumper = ToastExport(
outdir,
prefix=very_compressed_prefix,
use_intervals=True,
mask_flag_common=tod.TURNAROUND,
filesize=self.dumpsize * 100,
units=core3g.G3TimestreamUnits.Tcmb,
compress={"rmstarget" : rmstarget},
detgroups=detgroups,
verbose=False,
)
dumper.exec(self.data)
# Check that the timestreams do shrink in size
sizes = {}
for prefix in ["uncompressed", "uncompressed_all_but_one",
"compressed", "compressed_all_but_one",
"very_compressed", "very_compressed_all_but_one",
]:
fnames = glob("{}/{}/sat3_{}_0*.g3"
"".format(outdir, obs["name"], prefix))
sizes[prefix] = 0
for fname in fnames:
size = os.path.getsize(fname)
sizes[prefix] += size
# These are the sizes of individual timestreams
uncompressed_size = sizes["uncompressed"] - sizes["uncompressed_all_but_one"]
compressed_size = sizes["compressed"] - sizes["compressed_all_but_one"]
very_compressed_size = sizes["very_compressed"] - sizes["very_compressed_all_but_one"]
ratio1 = compressed_size / uncompressed_size
assert ratio1 < 1
ratio2 = very_compressed_size / uncompressed_size
assert ratio2 < ratio1
# Load the data back in
checkdata = load_data(outdir, comm=self.data.comm,
prefix=compressed_prefix)
# Verify that input and output are equal
checktod = checkdata.obs[0]["tod"]
times = tod.local_times()
checktimes = checktod.local_times()
assert_allclose(checktimes, times, rtol=1e-6, err_msg="Timestamps do not agree")
cflags = ((tod.local_common_flags() & tod.TURNAROUND) != 0).astype(np.uint8)
checkcflags = checktod.local_common_flags()
assert_array_equal(checkcflags, cflags, err_msg="Common flags do not agree")
print("\nCompression ratio1 is {:.4f} (default)\n"
"".format(ratio1), flush=True)
print("\nCompression ratio2 is {:.4f} (rmstarget={})\n"
"".format(ratio2, rmstarget), flush=True)
for det in tod.detectors:
sig = tod.local_signal(det)
checksig = checktod.local_signal(det)
assert_allclose(checksig, sig, atol=1e-5, rtol=1e-3,
err_msg="Compressed signal does not agree with the input")
flg = tod.local_flags(det)
checkflg = checktod.local_flags(det)
assert_array_equal(checkflg, flg, err_msg="Flags do not agree")
return
| 32.287037
| 94
| 0.567967
|
fe20d87c94a55c020e7e37619c4c270e35f43415
| 903
|
py
|
Python
|
web3/apps/sites/management/commands/import_web2.py
|
anonymoose2/director
|
c73c0e106372031ffe84bf044d8a88137518635c
|
[
"MIT"
] | 10
|
2017-03-20T14:40:56.000Z
|
2020-04-07T17:03:42.000Z
|
web3/apps/sites/management/commands/import_web2.py
|
anonymoose2/director
|
c73c0e106372031ffe84bf044d8a88137518635c
|
[
"MIT"
] | 50
|
2017-05-23T01:26:26.000Z
|
2020-06-05T17:13:25.000Z
|
web3/apps/sites/management/commands/import_web2.py
|
anonymoose2/director
|
c73c0e106372031ffe84bf044d8a88137518635c
|
[
"MIT"
] | 9
|
2017-03-18T01:23:38.000Z
|
2020-08-17T20:50:07.000Z
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from web3.apps.sites.models import Site
from web3.apps.sites.helpers import create_site_users, make_site_dirs, create_config_files, reload_services
import shutil
import os
class Command(BaseCommand):
help = "Import websites from web2 (/web_old)"
def handle(self, **options):
mappings = {
}
for name in mappings:
s = Site(name=name, domain="{}.sites.tjhsst.edu".format(name), category="php", purpose=mappings[name])
create_site_users(s)
make_site_dirs(s)
create_config_files(s)
shutil.move("/web_old/{}/public/".format(s.name), "{}public".format(s.path))
os.system("chown -R {}:{} {}".format(s.user.username, s.group.name, s.path))
self.stdout.write("Created Site: {}".format(s))
reload_services()
| 33.444444
| 114
| 0.638981
|
b4d72d1d499a511bd353b8a3765a5240f33b5b34
| 3,702
|
py
|
Python
|
calliope/core/util/observed_dict.py
|
FraSanvit/calliope
|
9d588fa98ec8eb11065ebb43bcc90d07657b0d43
|
[
"Apache-2.0"
] | 180
|
2015-02-04T20:03:58.000Z
|
2022-03-25T05:32:03.000Z
|
calliope/core/util/observed_dict.py
|
FraSanvit/calliope
|
9d588fa98ec8eb11065ebb43bcc90d07657b0d43
|
[
"Apache-2.0"
] | 363
|
2015-11-23T16:36:05.000Z
|
2022-03-29T17:03:58.000Z
|
calliope/core/util/observed_dict.py
|
FraSanvit/calliope
|
9d588fa98ec8eb11065ebb43bcc90d07657b0d43
|
[
"Apache-2.0"
] | 73
|
2016-06-07T09:34:07.000Z
|
2022-03-22T17:28:14.000Z
|
"""
Copyright (C) since 2013 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
"""
from collections.abc import Mapping
from calliope.core.attrdict import AttrDict
class ObservedDict(dict):
"""
Dictionary subclass which updates UpdateObserverDict when there is a change
in the values assigned to keys in the dictionary.
"""
def __init__(self, initial_dict, initial_yaml_string, on_changed=None, flat=False):
if initial_yaml_string is not None:
if flat:
attr = "as_dict_flat"
else:
attr = "as_dict"
initial_dict = getattr(
AttrDict.from_yaml_string(initial_yaml_string), attr
)()
super().__init__(initial_dict)
self.on_changed = on_changed
for k, v in initial_dict.items():
if isinstance(v, dict):
super().__setitem__(k, ObservedDict(v, None, on_changed=self.notify))
self.notify()
def __setitem__(self, key, value):
if isinstance(value, dict):
value = ObservedDict(value, None, on_changed=self.notify)
super().__setitem__(key, value)
self.notify()
def update(self, other=None, **kwargs):
if other is not None:
for k, v in other.items() if isinstance(other, Mapping) else other:
self[k] = v
for k, v in kwargs.items():
self[k] = v
def notify(self, updated=None):
if self.on_changed is not None:
return self.on_changed(self)
class UpdateObserverDict(ObservedDict):
"""
Dictionary subclass which observes a dictionary and updates an attribute of
the model_data xarray dataset with a YAML string of that dictionary.
This update takes place whenever a value is updated in the dictionary.
Parameters
----------
name : str
The model_data attribute key to update.
observer : xarray Dataset (e.g. calliope model_data)
The Dataset whose attribute dictionary will contain the key `name`
and value = a YAML string of the initial_dict.
initial_dict : dict, optional, default = None
An initial dictionary to copy for observing.
One of initial_dict or initial_yaml_string must be defined.
initial_yaml_string : str, optional, default = None
A YAML string of an initial dictionary to copy for observing.
One of initial_dict or initial_yaml_string must be defined.
Returns
-------
Observed dictionary, which acts as a dictionary in every sense *except* that
on changing or adding any key:value pairs in that dictionary, the YAML string
stored at `observer.attrs[name]` will be updated to reflect the new Observed
dicitonary.
"""
def __init__(
self,
name,
observer,
initial_dict=None,
initial_yaml_string=None,
*args,
**kwargs,
):
self.observer = observer
self.name = name
check_input_args = [i is None for i in [initial_dict, initial_yaml_string]]
if all(check_input_args) or not any(check_input_args):
raise ValueError(
"must supply one, and only one, of initial_dict or initial_yaml_string"
)
super().__init__(initial_dict, initial_yaml_string, *args, **kwargs)
def notify(self, updated=None):
temp_dict = {
k: v
for k, v in self.items()
if (not isinstance(v, dict) and v is not None)
or (isinstance(v, dict) and len(v.keys()) > 0)
}
self.observer.attrs[self.name] = AttrDict(temp_dict).to_yaml()
| 33.351351
| 87
| 0.632631
|
6bc6db85603b6a0d500d25a740dcea2150be6c9d
| 1,762
|
py
|
Python
|
salt/output/virt_query.py
|
dr4Ke/salt
|
8ffa4903c9ed10c81e1a6c7b967dc9532f320c0b
|
[
"Apache-2.0"
] | 2
|
2017-09-17T21:10:35.000Z
|
2019-08-26T03:00:12.000Z
|
salt/output/virt_query.py
|
dr4Ke/salt
|
8ffa4903c9ed10c81e1a6c7b967dc9532f320c0b
|
[
"Apache-2.0"
] | null | null | null |
salt/output/virt_query.py
|
dr4Ke/salt
|
8ffa4903c9ed10c81e1a6c7b967dc9532f320c0b
|
[
"Apache-2.0"
] | 3
|
2021-02-23T08:12:48.000Z
|
2021-02-23T08:13:13.000Z
|
# -*- coding: utf-8 -*-
'''
virt.query outputter
====================
Used to display the output from the :mod:`virt.query <salt.runners.virt.query>`
runner.
'''
def output(data):
'''
Display output for the salt-run virt.query function
'''
out = ''
for id_ in data:
out += '{0}\n'.format(id_)
for vm_ in data[id_]['vm_info']:
out += ' {0}\n'.format(vm_)
vm_data = data[id_]['vm_info'][vm_]
if 'cpu' in vm_data:
out += ' CPU: {0}\n'.format(vm_data['cpu'])
if 'mem' in vm_data:
out += ' Memory: {0}\n'.format(vm_data['mem'])
if 'state' in vm_data:
out += ' State: {0}\n'.format(vm_data['state'])
if 'graphics' in vm_data:
if vm_data['graphics'].get('type', '') == 'vnc':
out += ' Graphics: vnc - {0}:{1}\n'.format(
id_,
vm_data['graphics']['port'])
if 'disks' in vm_data:
for disk, d_data in vm_data['disks'].items():
out += ' Disk - {0}:\n'.format(disk)
out += ' Size: {0}\n'.format(d_data['disk size'])
out += ' File: {0}\n'.format(d_data['file'])
out += ' File Format: {0}\n'.format(d_data['file format'])
if 'nics' in vm_data:
for mac in vm_data['nics']:
out += ' Nic - {0}:\n'.format(mac)
out += ' Source: {0}\n'.format(vm_data['nics'][mac]['source'][vm_data['nics'][mac]['source'].keys()[0]])
out += ' Type: {0}\n'.format(vm_data['nics'][mac]['type'])
return out
| 40.045455
| 129
| 0.435301
|
c2cb76add1c5cacee5863d445a8086220b0771e4
| 19,169
|
py
|
Python
|
src/web/main.py
|
supernifty/NiftyWrite
|
8bca013675c16e3095e1a661bfa9eb23f70b8542
|
[
"BSD-3-Clause"
] | null | null | null |
src/web/main.py
|
supernifty/NiftyWrite
|
8bca013675c16e3095e1a661bfa9eb23f70b8542
|
[
"BSD-3-Clause"
] | 9
|
2019-01-22T06:51:14.000Z
|
2021-12-09T00:02:21.000Z
|
src/web/main.py
|
supernifty/NiftyWrite
|
8bca013675c16e3095e1a661bfa9eb23f70b8542
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
'''
main application defines available views
'''
import base64
import datetime
import flask
import flask_mail
import io
import json
import mimetypes
import os
import urllib.parse
import zipfile
from PIL import Image
import sqlalchemy
import flask_sqlalchemy
import auth
import config
import model
import proxy
import query
app = flask.Flask(__name__, template_folder='templates')
app.wsgi_app = proxy.ReverseProxied(app.wsgi_app)
app.config.from_pyfile('config.py')
app.secret_key = 'ducks in space'
mail = flask_mail.Mail(app)
if config.AUTHENTICATE:
authenticator = auth.GoogleAuth(app)
else:
authenticator = auth.NoAuth(app)
# database
_db_session = None
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
def db():
global _db_session
if _db_session is None: # first time
engine = sqlalchemy.create_engine(app.config['SQLALCHEMY_DATABASE_URI']) # for debugging , echo=True)
# get session
session_maker = sqlalchemy.orm.scoped_session(
sqlalchemy.orm.sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
model.Base.query = session_maker.query_property()
# create and bind
model.Base.metadata.create_all(bind = engine, checkfirst=True)
#model.Base.metadata.bind = engine
# remember
_db_session = session_maker
return _db_session
@app.teardown_appcontext
def shutdown_session(exception=None):
db().remove()
@app.before_request
def make_session_permanent():
flask.session.permanent = True
app.permanent_session_lifetime = datetime.timedelta(hours=24*30) # one month
### program logic
# main summary page
@app.route("/")
def home():
if not authenticator.is_auth(flask.session):
return flask.redirect(flask.url_for('login'))
return flask.render_template('home.html')
# getters
@app.route("/get/<category>", methods=['GET'])
def get_data(category):
if not authenticator.is_auth(flask.session):
return flask.jsonify(status="auth", message="User is not authenticated")
try:
# project level
if category == 'projects':
if 'message' in flask.session:
message=flask.session['message']
del flask.session['message']
return flask.jsonify(
message=message,
username=authenticator.username(flask.session),
projects=query.summary(query.projects(db(), authenticator.user_id(flask.session)).all()),
shared=query.summary(query.shared_projects(db(), authenticator.user_id(flask.session)))
)
else:
return flask.jsonify(
username=authenticator.username(flask.session),
projects=query.summary(query.projects(db(), authenticator.user_id(flask.session)).all()),
shared=query.summary(query.shared_projects(db(), authenticator.user_id(flask.session)))
)
if category == 'documents':
if flask.request.args.get('project_id') is None:
raise query.QueryException("Required parameter project_id not provided")
return flask.jsonify(username=authenticator.username(flask.session), documents=query.documents(db(), authenticator.user_id(flask.session), flask.request.args.get('project_id')))
if category == 'shares':
if flask.request.args.get('project_id') is None:
raise query.QueryException("Required parameter project_id not provided")
return flask.jsonify(status="success", username=authenticator.username(flask.session), shares=query.detail(query.shares(db(), authenticator.user_id(flask.session), flask.request.args.get('project_id'))))
# document level
if category == 'document':
if flask.request.args.get('project_id') is None:
raise query.QueryException("Required parameter project_id not provided")
if flask.request.args.get('document_id') is None:
raise query.QueryException("Required parameter document_id not provided")
document = query.document(db(), authenticator.user_id(flask.session), flask.request.args.get('project_id'), flask.request.args.get('document_id'))
return flask.jsonify(username=authenticator.username(flask.session), document=document.detail())
if category == 'folder':
if flask.request.args.get('project_id') is None:
raise query.QueryException("Required parameter project_id not provided")
if flask.request.args.get('document_id') is None:
raise query.QueryException("Required parameter document_id not provided")
folder = query.folder(db(), authenticator.user_id(flask.session), flask.request.args.get('project_id'), flask.request.args.get('document_id'))
children = query.children(db(), folder)
return flask.jsonify(username=authenticator.username(flask.session), document=folder.detail(), children=query.detail(children))
if category == 'attachment':
if flask.request.args.get('project_id') is None:
raise query.QueryException("Required parameter project_id not provided")
if flask.request.args.get('id') is None:
raise query.QueryException("Required parameter id not provided")
result = query.attachment(db(), authenticator.user_id(flask.session), flask.request.args.get('project_id'), flask.request.args.get('id'))
# post-processing
if flask.request.args.get('resize') is not None: # image resize
im = Image.open(result['filename'])
f = float(flask.request.args.get('resize'))
(width, height) = int(im.width * f), int(im.height * f)
im = im.resize((width, height))
with io.BytesIO() as output:
im.save(output, format=result['name'].split('.')[-1])
response = flask.make_response(output.getvalue())
else:
response = flask.make_response(open(result['filename'], 'rb').read())
content_type = mimetypes.MimeTypes().guess_type(result['name'])[0]
response.headers['Content-Type'] = content_type or 'application/octet-stream'
response.headers['Content-Disposition'] = 'inline; filename="{}"'.format(result["name"].replace('"', '')) # TODO encode name
return response
except query.QueryException as ex:
return flask.jsonify(status="error", message="Request failed: {}".format(ex.message))
# setters
@app.route("/set/<category>", methods=['POST'])
def set_data(category):
if not authenticator.is_auth(flask.session):
return flask.jsonify(status="auth", message="User is not authenticated")
try:
if category == 'project': # new project
req = json.loads(flask.request.form['request'])
# create project
query.add_project(db(), authenticator.user_id(flask.session), req['record']['name'], req['record']['renderer'])
return flask.jsonify(status="success")
if category == 'project_d': # delete project
project_id = flask.request.form['id']
query.delete_project(db(), authenticator.user_id(flask.session), project_id)
return flask.jsonify(status="success")
if category == 'share_revoke': # revoke access
project_id = flask.request.form['project_id']
project_user_id = flask.request.form['id']
query.revoke_access(db(), authenticator.user_id(flask.session), project_id, project_user_id)
shares = query.detail(query.shares(db(), authenticator.user_id(flask.session), project_id))
return flask.jsonify(status="success", shares=shares)
if category == 'document': # add folder/document
req = json.loads(flask.request.form['request'])
document = query.add_document(db(), authenticator.user_id(flask.session), req['record']['project_id'], req['record']['document_type'], req['record']['name'], req['record']['parent_id'], req['record']['predecessor_id'])
return flask.jsonify(status="success", parent_id=document.parent_id, document_id=document.id)
if category == 'document_d': # delete document
document_id = flask.request.form['id']
project_id = flask.request.form['project_id']
parent_id = query.delete_document(db(), authenticator.user_id(flask.session), project_id, document_id)
return flask.jsonify(status="success", parent_id=parent_id)
if category == 'document_s': # save document
document_id = flask.request.form['id']
project_id = flask.request.form['project_id']
content = flask.request.form['content']
query.update_document(db(), authenticator.user_id(flask.session), project_id, document_id, content)
return flask.jsonify(status="success")
if category == 'document_u': # update document
req = json.loads(flask.request.form['request'])
query.update_document_properties(db(), authenticator.user_id(flask.session), req['record']['project_id'], req['record']['document_id'], req['record']['name'], req['record']['renderer'])
return flask.jsonify(status="success")
if category == 'document_r': # rate document
document_id = flask.request.form['id']
project_id = flask.request.form['project_id']
rating = int(flask.request.form['rating'])
query.update_document_rating(db(), authenticator.user_id(flask.session), project_id, document_id, rating)
return flask.jsonify(status="success", rating=rating)
if category == 'document_m': # move document
document_id = flask.request.form['id']
project_id = flask.request.form['project_id']
target_id = flask.request.form['target_id']
parents = query.move_document(db(), authenticator.user_id(flask.session), project_id, document_id, target_id)
return flask.jsonify(status="success", parent_id_from=parents[0], parent_id_to=parents[1])
if category == 'attachment': # add attachment
req = json.loads(flask.request.form['request'])
query.add_attachments(db(), authenticator.user_id(flask.session), req['record']['project_id'], req['record']['id'], req['record']['file'])
return flask.jsonify(status="success")
if category == 'attachment_d': # delete attachment
attachment_id = flask.request.form['id']
project_id = flask.request.form['project_id']
query.delete_attachment(db(), authenticator.user_id(flask.session), project_id, attachment_id)
return flask.jsonify(status="success")
if category == 'bulk': # upload multiple documents
req = json.loads(flask.request.form['request'])
query.add_bulk(db(), authenticator.user_id(flask.session), req['record']['project_id'], req['record']['file'])
return flask.jsonify(status="success")
except query.AccessException as ex:
return flask.jsonify(status="access", message=ex.message)
except query.QueryException as ex:
return flask.jsonify(status="error", message=ex.message)
return flask.jsonify(status="error", message="Unrecognized command {}".format(category))
# export a project
@app.route("/export/<project_id>/", methods=['GET'])
def export_project(project_id):
if not authenticator.is_auth(flask.session):
return flask.jsonify(status="auth", message="User is not authenticated")
zipped_data = io.BytesIO()
zipped = zipfile.ZipFile(zipped_data, mode="w")
db_data = json.dumps(query.export_project(db(), authenticator.user_id(flask.session), project_id), cls=JSONEncoder)
zipped.writestr("db.json", db_data)
for attachment in query.attachments(db(), authenticator.user_id(flask.session), project_id):
zipped.write(attachment['filename'], attachment['id'])
zipped.close()
zipped_data.seek(0)
return flask.send_file(zipped_data, attachment_filename='enwrite.zip', as_attachment=True, mimetype='application/zip')
@app.route("/import", methods=['POST'])
def import_project():
if not authenticator.is_auth(flask.session):
return flask.jsonify(status="auth", message="User is not authenticated")
req = json.loads(flask.request.form['request'])
decoded = io.BytesIO(base64.b64decode(req['record']['file'][0]["content"]))
zipped = zipfile.ZipFile(decoded) # TODO limit to 1 file
db_data = json.loads(zipped.open('db.json').read())
query.import_project(db(), authenticator.user_id(flask.session), req['record']['name'], db_data, zipped)
return flask.jsonify(status="success")
# document sharing
@app.route("/share_p", methods=['POST'])
def share_p():
'''
generates a token that can be used to grant access to a project
'''
if not authenticator.is_auth(flask.session):
return flask.jsonify(status="auth", message="User is not authenticated")
# request fields: target, access, project_id, document_id
req = json.loads(flask.request.form['request'])
try:
# create a token for the given document and email the recipient
if req['record']['project_id'] is None:
raise query.QueryException("Required parameter project_id not provided")
result = query.add_token(db(), authenticator.user_id(flask.session), req['record']['project_id'], req['record']['access'], document_id=None)
# send email
if config.EMAIL:
# not yet implemented
msg = flask_mail.Message("Hello", sender="robot@supernifty.org", recipients=[req['record']['target']])
mail.send(msg)
return flask.jsonify(status="success", token=result)
except query.QueryException as ex:
return flask.jsonify(status="error", message=ex.message)
@app.route("/access/<token>/", methods=['GET'])
def access(token):
'''
use a token to accept access to a document
'''
if not authenticator.is_auth(flask.session):
target = flask.url_for('access', token=token)
login_target = flask.url_for('login', post=urllib.parse.quote_plus(target))
return flask.redirect(login_target)
try:
# apply token and redirect
result = query.apply_token(db(), authenticator.user_id(flask.session), token)
if result[0]: # token has been applied
return flask.redirect(flask.url_for('home'))
else: # token was not applied
flask.session["message"] = result[1]
return flask.redirect(flask.url_for('home'))
except query.QueryException as ex:
flask.session["message"] = ex.message
return flask.redirect(flask.url_for('home'))
# search
@app.route("/search", methods=['POST'])
def search():
if not authenticator.is_auth(flask.session):
return flask.jsonify(status="auth", message="User is not authenticated")
project_id = flask.request.form['project_id']
q = flask.request.form['q']
if flask.request.form['project_id'] is None:
raise query.QueryException("Required parameter project_id not provided")
return flask.jsonify(status="success", q=q, documents=query.summary(query.search(db(), authenticator.user_id(flask.session), project_id, q)))
@app.route("/search_recent", methods=['POST'])
def search_recent():
if not authenticator.is_auth(flask.session):
return flask.jsonify(status="auth", message="User is not authenticated")
project_id = flask.request.form['project_id']
if flask.request.form['project_id'] is None:
raise query.QueryException("Required parameter project_id not provided")
return flask.jsonify(status="success", q='Recently Updated', documents=query.summary(query.search_recent(db(), authenticator.user_id(flask.session), project_id)))
@app.route("/search_rated", methods=['POST'])
def search_rated():
if not authenticator.is_auth(flask.session):
return flask.jsonify(status="auth", message="User is not authenticated")
project_id = flask.request.form['project_id']
if flask.request.form['project_id'] is None:
raise query.QueryException("Required parameter project_id not provided")
return flask.jsonify(status="success", q='Top Rated', documents=query.summary(query.search_rated(db(), authenticator.user_id(flask.session), project_id)))
@app.route("/render/latex", methods=['POST'])
def render():
if not authenticator.is_auth(flask.session):
return flask.jsonify(status="auth", message="User is not authenticated")
content = flask.request.form['content']
# convert latex to html TODO queue?
try:
user_id = authenticator.user_id(flask.session)
# ensure existence of user specific asset dir
root = os.path.abspath(os.path.join(config.ASSETS, user_id))
if not os.path.exists(root):
os.makedirs(root)
# write tex
open('{root}/_fragment.tex'.format(root=root, user_id=user_id), 'w').write(content)
command = '{command} 1>"{root}/_fragment.out" 2>"{root}/_fragment.err"'.format(root=root, command=config.PANDOC.format(root=root, user_id=user_id))
return_code = os.system('{command} 1>"{root}/_fragment.out" 2>"{root}/_fragment.err"'.format(root=root, command=command))
if return_code == 0:
result = open('{root}/_fragment.html'.format(user_id=user_id, root=root), 'r').read()
else:
result = open('{root}/_fragment.err'.format(user_id=user_id, root=root), 'r').read().replace('\n', '<br/>')
return flask.jsonify(content=result)
except Exception as ex:
return flask.jsonify(status="error", message=ex)
finally:
os.system('/bin/rm {root}/_fragment.*'.format(user_id=user_id, root=root))
### authentication logic ###
@app.route('/login', defaults={'post': None})
@app.route('/login/<post>/')
def login(post):
return authenticator.authorize(flask.session, db(), post=post)
@app.route('/logout')
def logout():
authenticator.logout(flask.session)
return flask.redirect(flask.url_for('about'))
@app.route('/about')
def about():
return flask.render_template('about.html')
# end up here after authentication
@app.route('/authorized')
def authorized():
result = authenticator.authorized(flask.session, db())
if result is None:
if flask.session.get('next') is None:
return flask.redirect(flask.url_for('home'))
else:
unquoted = urllib.parse.unquote_plus(flask.session.get('next'))
if auth.is_safe_url(unquoted):
return flask.redirect(unquoted)
else:
return result # TODO: error page
else:
return result # TODO: error page
@authenticator.google.tokengetter
def get_google_oauth_token():
return authenticator.token(flask.session)
if __name__ == '__main__':
app.run(port=app.config['PORT'])
| 44.892272
| 230
| 0.668162
|
4128cb0701a1d2fb2d96b398323856ff47e4137b
| 923
|
py
|
Python
|
sidekick/models/__init__.py
|
cybera/netbox-sidekick
|
ec5e2080513d088e2604d8755f34b1d2592b95dd
|
[
"Apache-2.0"
] | 3
|
2020-09-07T12:14:31.000Z
|
2021-11-11T11:46:43.000Z
|
sidekick/models/__init__.py
|
cybera/netbox-sidekick
|
ec5e2080513d088e2604d8755f34b1d2592b95dd
|
[
"Apache-2.0"
] | null | null | null |
sidekick/models/__init__.py
|
cybera/netbox-sidekick
|
ec5e2080513d088e2604d8755f34b1d2592b95dd
|
[
"Apache-2.0"
] | null | null | null |
from .accounting import AccountingSource # noqa: F401
from .accounting import AccountingSourceCounter # noqa: F401
from .accounting import AccountingProfile # noqa: F401
from .accounting import BandwidthProfile # noqa: F401
from .contact import ContactType, Contact, MemberContact # noqa: F401
from .networkservice import LogicalSystem, RoutingType # noqa: F401
from .networkservice import NetworkServiceType # noqa: F401
from .networkservice import NetworkService # noqa: F401
from .networkservice import NetworkServiceDevice # noqa: F401
from .networkservice import NetworkServiceL3 # noqa: F401
from .networkservice import NetworkServiceL2 # noqa: F401
from .networkservice import NetworkServiceGroup # noqa: F401
from .nic import NIC # noqa: F401
| 65.928571
| 70
| 0.666306
|
c57aaa287e955382c331c011fc04fd0864e20fd6
| 755
|
py
|
Python
|
medium/Q8_StringToInteger.py
|
Kaciras/leetcode
|
d203aecd1afe1af13a0384a9c657c8424aab322d
|
[
"MIT"
] | null | null | null |
medium/Q8_StringToInteger.py
|
Kaciras/leetcode
|
d203aecd1afe1af13a0384a9c657c8424aab322d
|
[
"MIT"
] | null | null | null |
medium/Q8_StringToInteger.py
|
Kaciras/leetcode
|
d203aecd1afe1af13a0384a9c657c8424aab322d
|
[
"MIT"
] | null | null | null |
class Solution:
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.lstrip()
if len(str) == 0:
return 0
i, r, signum = 0, 0, 1
if str[0] == "-":
i, signum = 1, -1
elif str[0] == "+":
i = 1
while i < len(str):
v = ord(str[i]) - 48 # 48换为ord("0")更可读
if not 0 <= v < 10:
break # 可以换用isdigit()
r = r * 10 + v
i += 1
r *= signum
if r > 2 ** 31 - 1:
return 2 ** 31 - 1
if r < -2 ** 31:
return -2 ** 31
return r
if __name__ == '__main__':
print(Solution().myAtoi(" -0012a42"))
print(Solution().myAtoi("+1"))
print(Solution().myAtoi(" -42"))
print(Solution().myAtoi("4193 with words"))
print(Solution().myAtoi("words and 987"))
print(Solution().myAtoi("-91283472332"))
| 19.358974
| 44
| 0.528477
|
f3eaff6cf0f8520ae0e47532990f9c0f59601d01
| 218
|
py
|
Python
|
moengage/exceptions.py
|
Deepanshu07/moengage
|
1beb08b89411d931391dee48ebdf59f872edc6e4
|
[
"MIT"
] | 1
|
2022-01-01T17:11:37.000Z
|
2022-01-01T17:11:37.000Z
|
moengage/exceptions.py
|
Deepanshu07/moengage
|
1beb08b89411d931391dee48ebdf59f872edc6e4
|
[
"MIT"
] | null | null | null |
moengage/exceptions.py
|
Deepanshu07/moengage
|
1beb08b89411d931391dee48ebdf59f872edc6e4
|
[
"MIT"
] | null | null | null |
class MoengageAPIException(Exception):
def __init__(self, message):
super().__init__(message)
class MoengageWrapperException(Exception):
def __init__(self, message):
super().__init__(message)
| 24.222222
| 42
| 0.715596
|
0a548711bb6f746ee91a29e0c54517079b57954d
| 1,386
|
py
|
Python
|
demosys/opengl/projection.py
|
Contraz/demosys-py
|
0479e0f3b0a3901f601bffd2d11e155f97b47555
|
[
"0BSD"
] | 70
|
2017-03-31T12:01:41.000Z
|
2022-01-05T06:30:57.000Z
|
demosys/opengl/projection.py
|
Contraz/demosys-py
|
0479e0f3b0a3901f601bffd2d11e155f97b47555
|
[
"0BSD"
] | 69
|
2017-06-18T22:37:46.000Z
|
2020-01-23T04:02:22.000Z
|
demosys/opengl/projection.py
|
Contraz/demosys-py
|
0479e0f3b0a3901f601bffd2d11e155f97b47555
|
[
"0BSD"
] | 9
|
2017-05-13T21:13:02.000Z
|
2020-10-01T18:09:49.000Z
|
from pyrr import Matrix44
class Projection:
"""
Represent a projection matrix and its various properties
including tools.
"""
def __init__(self, aspect_ratio=9 / 16, fov=75, near=1, far=100):
self.aspect_ratio = aspect_ratio
self.fov = fov
self.near = near
self.far = far
self.matrix = None
self.update()
def update(self, aspect_ratio=None, fov=None, near=None, far=None):
"""
Update the internal projection matrix based on current values
or values passed in if specified.
:param aspect_ratio: New aspect ratio
:param fov: New field of view
:param near: New near value
:param far: New far value
"""
self.aspect_ratio = aspect_ratio or self.aspect_ratio
self.fov = fov or self.fov
self.near = near or self.near
self.far = far or self.far
self.matrix = Matrix44.perspective_projection(self.fov, self.aspect_ratio, self.near, self.far)
def tobytes(self):
return self.matrix.astype('f4').tobytes()
@property
def projection_constants(self):
"""
Returns the (x, y) projection constants for the current projection.
:return: x, y tuple projection constants
"""
return self.far / (self.far - self.near), (self.far * self.near) / (self.near - self.far)
| 31.5
| 103
| 0.621934
|
ec4d31aa60e95c47083cd76a6e493ca356411b63
| 597
|
py
|
Python
|
amundsenatlastypes/client.py
|
stswn/amundsen-atlas-types
|
99a650b4930ae0674636173fc3bec4b1e6880e42
|
[
"Apache-2.0"
] | 19
|
2019-11-06T01:14:09.000Z
|
2021-07-26T12:06:49.000Z
|
amundsenatlastypes/client.py
|
stswn/amundsen-atlas-types
|
99a650b4930ae0674636173fc3bec4b1e6880e42
|
[
"Apache-2.0"
] | 11
|
2019-11-05T11:37:47.000Z
|
2021-06-13T16:13:09.000Z
|
amundsenatlastypes/client.py
|
stswn/amundsen-atlas-types
|
99a650b4930ae0674636173fc3bec4b1e6880e42
|
[
"Apache-2.0"
] | 13
|
2019-08-16T10:12:44.000Z
|
2021-09-22T06:12:55.000Z
|
import os
from atlasclient.client import Atlas
class AtlasClient:
host = os.environ.get('ATLAS_HOST', 'localhost')
port = os.environ.get('ATLAS_PORT', 21000)
user = os.environ.get('ATLAS_USERNAME', 'admin')
password = os.environ.get('ATLAS_PASSWORD', 'admin')
timeout = os.environ.get('ATLAS_REQUEST_TIMEOUT', 60)
def driver(self):
return Atlas(host=self.host,
port=self.port,
username=self.user,
password=self.password,
timeout=self.timeout)
driver = AtlasClient().driver()
| 27.136364
| 57
| 0.60804
|
11ec8705cc0f0824475d2fbdf4171fa509aa8738
| 409
|
py
|
Python
|
expenses_report/visualizations/i_visualization.py
|
kircher-sw/expenses-tracker
|
afd9550616a79f54dd119d91cec209c7748e9689
|
[
"BSD-3-Clause"
] | 2
|
2019-07-24T16:01:12.000Z
|
2021-07-21T01:51:33.000Z
|
expenses_report/visualizations/i_visualization.py
|
kircher-sw/expenses-tracker
|
afd9550616a79f54dd119d91cec209c7748e9689
|
[
"BSD-3-Clause"
] | null | null | null |
expenses_report/visualizations/i_visualization.py
|
kircher-sw/expenses-tracker
|
afd9550616a79f54dd119d91cec209c7748e9689
|
[
"BSD-3-Clause"
] | null | null | null |
import plotly.graph_objects as go
from expenses_report.preprocessing.data_provider import DataProvider
class IVisualization:
""""""
def prepare_data(self, data: DataProvider):
""""""
pass
def build_visualization(self) -> go.Figure:
""""""
pass
def build(self, data: DataProvider):
self.prepare_data(data)
return self.build_visualization()
| 20.45
| 68
| 0.647922
|
29220550568510ab527ca675a6ceac4b61e88808
| 7,683
|
py
|
Python
|
python/http_client/v1/polyaxon_sdk/models/v1_grid_search.py
|
polyaxon/client
|
1beaf78b0aa9fabc37b42a253d5e29c0623d8b16
|
[
"Apache-2.0"
] | null | null | null |
python/http_client/v1/polyaxon_sdk/models/v1_grid_search.py
|
polyaxon/client
|
1beaf78b0aa9fabc37b42a253d5e29c0623d8b16
|
[
"Apache-2.0"
] | null | null | null |
python/http_client/v1/polyaxon_sdk/models/v1_grid_search.py
|
polyaxon/client
|
1beaf78b0aa9fabc37b42a253d5e29c0623d8b16
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.18.0
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1GridSearch(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kind': 'str',
'params': 'dict(str, object)',
'num_runs': 'int',
'seed': 'int',
'concurrency': 'int',
'early_stopping': 'list[object]'
}
attribute_map = {
'kind': 'kind',
'params': 'params',
'num_runs': 'numRuns',
'seed': 'seed',
'concurrency': 'concurrency',
'early_stopping': 'earlyStopping'
}
def __init__(self, kind='grid', params=None, num_runs=None, seed=None, concurrency=None, early_stopping=None, local_vars_configuration=None): # noqa: E501
"""V1GridSearch - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._kind = None
self._params = None
self._num_runs = None
self._seed = None
self._concurrency = None
self._early_stopping = None
self.discriminator = None
if kind is not None:
self.kind = kind
if params is not None:
self.params = params
if num_runs is not None:
self.num_runs = num_runs
if seed is not None:
self.seed = seed
if concurrency is not None:
self.concurrency = concurrency
if early_stopping is not None:
self.early_stopping = early_stopping
@property
def kind(self):
"""Gets the kind of this V1GridSearch. # noqa: E501
:return: The kind of this V1GridSearch. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1GridSearch.
:param kind: The kind of this V1GridSearch. # noqa: E501
:type kind: str
"""
self._kind = kind
@property
def params(self):
"""Gets the params of this V1GridSearch. # noqa: E501
:return: The params of this V1GridSearch. # noqa: E501
:rtype: dict(str, object)
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this V1GridSearch.
:param params: The params of this V1GridSearch. # noqa: E501
:type params: dict(str, object)
"""
self._params = params
@property
def num_runs(self):
"""Gets the num_runs of this V1GridSearch. # noqa: E501
:return: The num_runs of this V1GridSearch. # noqa: E501
:rtype: int
"""
return self._num_runs
@num_runs.setter
def num_runs(self, num_runs):
"""Sets the num_runs of this V1GridSearch.
:param num_runs: The num_runs of this V1GridSearch. # noqa: E501
:type num_runs: int
"""
self._num_runs = num_runs
@property
def seed(self):
"""Gets the seed of this V1GridSearch. # noqa: E501
:return: The seed of this V1GridSearch. # noqa: E501
:rtype: int
"""
return self._seed
@seed.setter
def seed(self, seed):
"""Sets the seed of this V1GridSearch.
:param seed: The seed of this V1GridSearch. # noqa: E501
:type seed: int
"""
self._seed = seed
@property
def concurrency(self):
"""Gets the concurrency of this V1GridSearch. # noqa: E501
:return: The concurrency of this V1GridSearch. # noqa: E501
:rtype: int
"""
return self._concurrency
@concurrency.setter
def concurrency(self, concurrency):
"""Sets the concurrency of this V1GridSearch.
:param concurrency: The concurrency of this V1GridSearch. # noqa: E501
:type concurrency: int
"""
self._concurrency = concurrency
@property
def early_stopping(self):
"""Gets the early_stopping of this V1GridSearch. # noqa: E501
:return: The early_stopping of this V1GridSearch. # noqa: E501
:rtype: list[object]
"""
return self._early_stopping
@early_stopping.setter
def early_stopping(self, early_stopping):
"""Sets the early_stopping of this V1GridSearch.
:param early_stopping: The early_stopping of this V1GridSearch. # noqa: E501
:type early_stopping: list[object]
"""
self._early_stopping = early_stopping
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1GridSearch):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1GridSearch):
return True
return self.to_dict() != other.to_dict()
| 27.537634
| 159
| 0.591566
|
14fcc7b7d83a9e95f73604f895712296ebcf1317
| 588
|
py
|
Python
|
tests/test_docs.py
|
koaning/icepickle
|
f6692e334ceebe8390d8b4960a56eb661236edd3
|
[
"MIT"
] | 8
|
2022-02-14T20:20:30.000Z
|
2022-03-08T10:03:13.000Z
|
tests/test_docs.py
|
koaning/icepickle
|
f6692e334ceebe8390d8b4960a56eb661236edd3
|
[
"MIT"
] | 1
|
2022-02-20T08:40:42.000Z
|
2022-02-20T15:02:58.000Z
|
tests/test_docs.py
|
koaning/icepickle
|
f6692e334ceebe8390d8b4960a56eb661236edd3
|
[
"MIT"
] | null | null | null |
import pytest
from mktestdocs import check_docstring, check_md_file
from icepickle.pipeline import (
PartialPipeline,
PartialFeatureUnion,
make_partial_pipeline,
make_partial_union,
)
components = [
PartialPipeline,
PartialFeatureUnion,
make_partial_pipeline,
make_partial_union,
]
@pytest.mark.parametrize("obj", components, ids=lambda d: d.__qualname__)
def test_member(obj):
"""The example snippets must run."""
check_docstring(obj)
def test_readme_works():
"""The code-blocks must run."""
check_md_file("README.md", memory=True)
| 20.275862
| 73
| 0.732993
|
9b409f1804c5fc1cb2ebf8681820456ccd4d543d
| 1,916
|
py
|
Python
|
CS224d_assignment1/q1_softmax.py
|
Darktex/CS224d
|
ea31492e4be8563189aed44cdde7bb66eb424a68
|
[
"MIT"
] | 1
|
2017-05-01T10:07:02.000Z
|
2017-05-01T10:07:02.000Z
|
CS224d_assignment1/q1_softmax.py
|
Darktex/CS224d
|
ea31492e4be8563189aed44cdde7bb66eb424a68
|
[
"MIT"
] | null | null | null |
CS224d_assignment1/q1_softmax.py
|
Darktex/CS224d
|
ea31492e4be8563189aed44cdde7bb66eb424a68
|
[
"MIT"
] | 1
|
2018-09-29T10:12:00.000Z
|
2018-09-29T10:12:00.000Z
|
import numpy as np
import random
def softmax(x):
"""
Compute the softmax function for each row of the input x.
It is crucial that this function is optimized for speed because
it will be used frequently in later code.
You might find numpy functions np.exp, np.sum, np.reshape,
np.max, and numpy broadcasting useful for this task. (numpy
broadcasting documentation:
http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
You should also make sure that your code works for one
dimensional inputs (treat the vector as a row), you might find
it helpful for your later problems.
You must implement the optimization in problem 1(a) of the
written assignment!
"""
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
return x
def test_softmax_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
test1 = softmax(np.array([1,2]))
print test1
assert np.amax(np.fabs(test1 - np.array(
[0.26894142, 0.73105858]))) <= 1e-6
test2 = softmax(np.array([[1001,1002],[3,4]]))
print test2
assert np.amax(np.fabs(test2 - np.array(
[[0.26894142, 0.73105858], [0.26894142, 0.73105858]]))) <= 1e-6
test3 = softmax(np.array([[-1001,-1002]]))
print test3
assert np.amax(np.fabs(test3 - np.array(
[0.73105858, 0.26894142]))) <= 1e-6
print "You should verify these results!\n"
def test_softmax():
"""
Use this space to test your softmax implementation by running:
python q1_softmax.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_softmax_basic()
test_softmax()
| 29.030303
| 71
| 0.655532
|
e849e16d51847a346670d07af2621cc1070f5659
| 28,879
|
py
|
Python
|
Lib/distutils/msvc9compiler.py
|
deadsnakes/python3.1
|
88d77610a7873c5161bfc15cd69557fc7697b1a3
|
[
"PSF-2.0"
] | null | null | null |
Lib/distutils/msvc9compiler.py
|
deadsnakes/python3.1
|
88d77610a7873c5161bfc15cd69557fc7697b1a3
|
[
"PSF-2.0"
] | null | null | null |
Lib/distutils/msvc9compiler.py
|
deadsnakes/python3.1
|
88d77610a7873c5161bfc15cd69557fc7697b1a3
|
[
"PSF-2.0"
] | null | null | null |
"""distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# ported to VS2005 and VS 2008 by Christian Heimes
__revision__ = "$Id$"
import os
import subprocess
import sys
import re
from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import CCompiler, gen_preprocess_options, \
gen_lib_options
from distutils import log
from distutils.util import get_platform
import winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegError = winreg.error
HKEYS = (winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT)
NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
# the param to cross-compile on x86 targetting amd64.)
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
'win-ia64' : 'ia64',
}
class Reg:
"""Helper class to read values from the registry
"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE,
"sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable.
"""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
log.debug("Unable to find productdir in registry")
productdir = None
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment
"""
vcvarsall = find_vcvarsall(version)
interesting = set(("include", "lib", "libpath", "path"))
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
# More globals
VERSION = get_build_version()
if VERSION < 8.0:
raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
# MACROS = MacroExpander(VERSION)
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64', 'win-ia64'
if plat_name not in ok_plats:
raise DistutilsPlatformError("--plat-name must be one of %s" %
(ok_plats,))
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
# No idea how itanium handles this, if at all.
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
PLAT_TO_VCVARS[plat_name]
vc_env = query_vcvarsall(VERSION, plat_spec)
self.__paths = vc_env['path'].split(os.pathsep)
os.environ['lib'] = vc_env['lib']
os.environ['include'] = vc_env['include']
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
#self.set_path_env_var('lib')
#self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
# Embedded manifests are recommended - see MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can embed it later.
temp_manifest = os.path.join(
build_temp,
os.path.basename(output_filename) + ".manifest")
ld_args.append('/MANIFESTFILE:' + temp_manifest)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
if target_desc == CCompiler.EXECUTABLE:
mfid = 1
else:
mfid = 2
# Remove references to the Visual C runtime
self._remove_visual_c_ref(temp_manifest)
out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest',
temp_manifest, out_arg])
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = "<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
finally:
manifest_f.close()
except IOError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
| 38.250331
| 99
| 0.55639
|
6ab637da8fc90bedf5a46481d9d6a270c4c01895
| 21,957
|
py
|
Python
|
qa/rpc-tests/replace-by-fee.py
|
infertux/bitcoinclassic
|
e744f27b6c6375749ce81aa395fb82445b40af49
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/replace-by-fee.py
|
infertux/bitcoinclassic
|
e744f27b6c6375749ce81aa395fb82445b40af49
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/replace-by-fee.py
|
infertux/bitcoinclassic
|
e744f27b6c6375749ce81aa395fb82445b40af49
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace by fee code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-relaypriority=0", "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101",
"-mempoolreplacement=1"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print "Running test simple doublespend..."
self.test_simple_doublespend()
print "Running test doublespend chain..."
self.test_doublespend_chain()
print "Running test doublespend tree..."
self.test_doublespend_tree()
print "Running test replacement feeperkb..."
self.test_replacement_feeperkb()
print "Running test spends of conflicting outputs..."
self.test_spends_of_conflicting_outputs()
print "Running test new unconfirmed inputs..."
self.test_new_unconfirmed_inputs()
print "Running test too many replacements..."
self.test_too_many_replacements()
print "Running test opt-in..."
self.test_opt_in()
print "Running test prioritised transactions..."
self.test_prioritised_transactions()
print "Passed\n"
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work if orig tx opted in """
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
print tx1b_txid
assert(False)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
| 37.405451
| 105
| 0.5999
|
4ba4ada7a8c4e582c2de1d2c8a1305b8826c5d4a
| 32,422
|
py
|
Python
|
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_append_blob_operations_async.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_append_blob_operations_async.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | 1
|
2020-03-06T05:57:16.000Z
|
2020-03-06T05:57:16.000Z
|
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_append_blob_operations_async.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import map_error
from ... import models
class AppendBlobOperations:
"""AppendBlobOperations async operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob".
:ivar comp: . Constant value: "appendblock".
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
self.x_ms_blob_type = "AppendBlob"
self.comp = "appendblock"
async def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
"""The Create Append Blob operation creates a new append blob.
:param content_length: The length of the request.
:type content_length: long
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>
:type timeout: int
:param metadata: Optional. Specifies a user-defined name-value pair
associated with the blob. If no name-value pairs are specified, the
operation will copy the metadata from the source blob or file to the
destination blob. If one or more name-value pairs are specified, the
destination blob is created with the specified metadata, and metadata
is not copied from the source blob or file. Note that beginning with
version 2009-09-19, metadata names must adhere to the naming rules for
C# identifiers. See Naming and Referencing Containers, Blobs, and
Metadata for more information.
:type metadata: str
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param blob_http_headers: Additional parameters for the operation
:type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.blob.models.LeaseAccessConditions
:param cpk_info: Additional parameters for the operation
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
blob_content_type = None
if blob_http_headers is not None:
blob_content_type = blob_http_headers.blob_content_type
blob_content_encoding = None
if blob_http_headers is not None:
blob_content_encoding = blob_http_headers.blob_content_encoding
blob_content_language = None
if blob_http_headers is not None:
blob_content_language = blob_http_headers.blob_content_language
blob_content_md5 = None
if blob_http_headers is not None:
blob_content_md5 = blob_http_headers.blob_content_md5
blob_cache_control = None
if blob_http_headers is not None:
blob_cache_control = blob_http_headers.blob_cache_control
blob_content_disposition = None
if blob_http_headers is not None:
blob_content_disposition = blob_http_headers.blob_content_disposition
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
encryption_key = None
if cpk_info is not None:
encryption_key = cpk_info.encryption_key
encryption_key_sha256 = None
if cpk_info is not None:
encryption_key_sha256 = cpk_info.encryption_key_sha256
encryption_algorithm = None
if cpk_info is not None:
encryption_algorithm = cpk_info.encryption_algorithm
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {}
header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
if blob_content_type is not None:
header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
if blob_content_encoding is not None:
header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
if blob_content_language is not None:
header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
if blob_content_md5 is not None:
header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
if blob_cache_control is not None:
header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
if blob_content_disposition is not None:
header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
if encryption_key is not None:
header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
if encryption_key_sha256 is not None:
header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
if encryption_algorithm is not None:
header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
create.metadata = {'url': '/{containerName}/{blob}'}
async def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
"""The Append Block operation commits a new block of data to the end of an
existing append blob. The Append Block operation is permitted only if
the blob was created with x-ms-blob-type set to AppendBlob. Append
Block is supported only on version 2015-02-21 version or later.
:param body: Initial data
:type body: Generator
:param content_length: The length of the request.
:type content_length: long
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>
:type timeout: int
:param transactional_content_md5: Specify the transactional md5 for
the body, to be validated by the service.
:type transactional_content_md5: bytearray
:param transactional_content_crc64: Specify the transactional crc64
for the body, to be validated by the service.
:type transactional_content_crc64: bytearray
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.blob.models.LeaseAccessConditions
:param append_position_access_conditions: Additional parameters for
the operation
:type append_position_access_conditions:
~azure.storage.blob.models.AppendPositionAccessConditions
:param cpk_info: Additional parameters for the operation
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
max_size = None
if append_position_access_conditions is not None:
max_size = append_position_access_conditions.max_size
append_position = None
if append_position_access_conditions is not None:
append_position = append_position_access_conditions.append_position
encryption_key = None
if cpk_info is not None:
encryption_key = cpk_info.encryption_key
encryption_key_sha256 = None
if cpk_info is not None:
encryption_key_sha256 = cpk_info.encryption_key_sha256
encryption_algorithm = None
if cpk_info is not None:
encryption_algorithm = cpk_info.encryption_algorithm
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
# Construct URL
url = self.append_block.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/octet-stream'
header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
if transactional_content_md5 is not None:
header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
if transactional_content_crc64 is not None:
header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
if max_size is not None:
header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
if append_position is not None:
header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
if encryption_key is not None:
header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
if encryption_key_sha256 is not None:
header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
if encryption_algorithm is not None:
header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
# Construct body
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
append_block.metadata = {'url': '/{containerName}/{blob}'}
async def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
"""The Append Block operation commits a new block of data to the end of an
existing append blob where the contents are read from a source url. The
Append Block operation is permitted only if the blob was created with
x-ms-blob-type set to AppendBlob. Append Block is supported only on
version 2015-02-21 version or later.
:param source_url: Specify a URL to the copy source.
:type source_url: str
:param content_length: The length of the request.
:type content_length: long
:param source_range: Bytes of source data in the specified range.
:type source_range: str
:param source_content_md5: Specify the md5 calculated for the range of
bytes that must be read from the copy source.
:type source_content_md5: bytearray
:param source_contentcrc64: Specify the crc64 calculated for the range
of bytes that must be read from the copy source.
:type source_contentcrc64: bytearray
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>
:type timeout: int
:param transactional_content_md5: Specify the transactional md5 for
the body, to be validated by the service.
:type transactional_content_md5: bytearray
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param cpk_info: Additional parameters for the operation
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.blob.models.LeaseAccessConditions
:param append_position_access_conditions: Additional parameters for
the operation
:type append_position_access_conditions:
~azure.storage.blob.models.AppendPositionAccessConditions
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param source_modified_access_conditions: Additional parameters for
the operation
:type source_modified_access_conditions:
~azure.storage.blob.models.SourceModifiedAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
encryption_key = None
if cpk_info is not None:
encryption_key = cpk_info.encryption_key
encryption_key_sha256 = None
if cpk_info is not None:
encryption_key_sha256 = cpk_info.encryption_key_sha256
encryption_algorithm = None
if cpk_info is not None:
encryption_algorithm = cpk_info.encryption_algorithm
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
max_size = None
if append_position_access_conditions is not None:
max_size = append_position_access_conditions.max_size
append_position = None
if append_position_access_conditions is not None:
append_position = append_position_access_conditions.append_position
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
source_if_modified_since = None
if source_modified_access_conditions is not None:
source_if_modified_since = source_modified_access_conditions.source_if_modified_since
source_if_unmodified_since = None
if source_modified_access_conditions is not None:
source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
source_if_match = None
if source_modified_access_conditions is not None:
source_if_match = source_modified_access_conditions.source_if_match
source_if_none_match = None
if source_modified_access_conditions is not None:
source_if_none_match = source_modified_access_conditions.source_if_none_match
# Construct URL
url = self.append_block_from_url.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
if source_range is not None:
header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
if source_content_md5 is not None:
header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
if source_contentcrc64 is not None:
header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
if transactional_content_md5 is not None:
header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
if encryption_key is not None:
header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
if encryption_key_sha256 is not None:
header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
if encryption_algorithm is not None:
header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
if max_size is not None:
header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
if append_position is not None:
header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if source_if_modified_since is not None:
header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
if source_if_unmodified_since is not None:
header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
if source_if_match is not None:
header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
if source_if_none_match is not None:
header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
append_block_from_url.metadata = {'url': '/{containerName}/{blob}'}
| 60.152134
| 384
| 0.690426
|
74c9a12db05d3ffa65f01dcef3dc71bc26e24d14
| 5,439
|
py
|
Python
|
sanaviron/src/3rd/pycha/tests/pie.py
|
StetHD/sanaviron
|
dcb5d3ac6725771942e669a29961ba3f811b7fd4
|
[
"Apache-2.0"
] | null | null | null |
sanaviron/src/3rd/pycha/tests/pie.py
|
StetHD/sanaviron
|
dcb5d3ac6725771942e669a29961ba3f811b7fd4
|
[
"Apache-2.0"
] | null | null | null |
sanaviron/src/3rd/pycha/tests/pie.py
|
StetHD/sanaviron
|
dcb5d3ac6725771942e669a29961ba3f811b7fd4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2007-2008 by Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of PyCha.
#
# PyCha is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyCha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyCha. If not, see <http://www.gnu.org/licenses/>.
import math
import unittest
import cairo
import pycha.pie
class SliceTests(unittest.TestCase):
def test_init(self):
slice = pycha.pie.Slice('test', 3 / 5.0, 0, 4, 1 / 4.0)
self.assertEqual(slice.name, 'test')
self.assertEqual(slice.fraction, 3 / 5.0)
self.assertEqual(slice.xval, 0)
self.assertEqual(slice.yval, 4)
self.assertEqual(slice.startAngle, math.pi / 2)
self.assertEqual(slice.endAngle, 1.7 * math.pi)
def test_isBigEnough(self):
slice = pycha.pie.Slice('test 1', 3 / 5.0, 0, 4, 1 / 4.0)
self.assertEqual(slice.isBigEnough(), True)
slice = pycha.pie.Slice('test 2', 1 / 10000.0, 0, 4, 1 / 4.0)
self.assertEqual(slice.isBigEnough(), False)
def test_normalisedAngle(self):
# First quadrant
slice = pycha.pie.Slice('test 1', 1 / 6.0, 0, 4, 0)
self.assertAlmostEqual(slice.getNormalisedAngle(), 1 / 6.0 * math.pi, 4)
# Second quadrant
slice = pycha.pie.Slice('test 1', 1 / 6.0, 0, 4, 1 / 4.0)
self.assertAlmostEqual(slice.getNormalisedAngle(), 2 / 3.0 * math.pi, 4)
# Third quadrant
slice = pycha.pie.Slice('test 1', 1 / 6.0, 0, 4, 1 / 2.0)
self.assertAlmostEqual(slice.getNormalisedAngle(), 7 / 6.0 * math.pi, 4)
# Fouth quadrant
slice = pycha.pie.Slice('test 1', 1 / 6.0, 0, 4, 3 / 4.0)
self.assertAlmostEqual(slice.getNormalisedAngle(), 10 / 6.0 * math.pi, 4)
# Bigger than a circle
slice = pycha.pie.Slice('test 1', 2 / 3.0, 0, 4, 3 / 4.0)
self.assertAlmostEqual(slice.getNormalisedAngle(), 1 / 6.0 * math.pi, 4)
# Negative angle
slice = pycha.pie.Slice('test 1', -1 / 6.0, 0, 4, 0)
self.assertAlmostEqual(slice.getNormalisedAngle(), 11 / 6.0 * math.pi, 4)
class PieTests(unittest.TestCase):
def test_init(self):
ch = pycha.pie.PieChart(None)
self.assertEqual(ch.slices, [])
self.assertEqual(ch.centerx, 0)
self.assertEqual(ch.centery, 0)
self.assertEqual(ch.radius, 0)
def test_updateChart(self):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 500, 500)
dataset = (
('dataset1', ([0, 10],)),
('dataset2', ([0, 20],)),
('dataset3', ([0, 70],)),
)
opt = {'padding': {'left': 0, 'right': 0, 'top': 0, 'bottom': 0},
'pieRadius': 0.5}
ch = pycha.pie.PieChart(surface, opt)
ch.addDataset(dataset)
ch._updateXY()
ch._updateChart()
self.assertEqual(ch.centerx, 250)
self.assertEqual(ch.centery, 250)
self.assertEqual(ch.radius, 250)
slices = (
pycha.pie.Slice('dataset1', 0.1, 0, 10, 0),
pycha.pie.Slice('dataset2', 0.2, 1, 20, 0.1),
pycha.pie.Slice('dataset3', 0.7, 2, 70, 0.3),
)
for i, slice in enumerate(slices):
s1, s2 = ch.slices[i], slice
self.assertEqual(s1.name, s2.name)
self.assertAlmostEqual(s1.fraction, s2.fraction, 4)
self.assertAlmostEqual(s1.startAngle, s2.startAngle, 4)
self.assertAlmostEqual(s1.endAngle, s2.endAngle, 4)
self.assertEqual(s1.xval, s2.xval)
self.assertEqual(s1.yval, s2.yval)
def test_updateTicks(self):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 500, 500)
dataset = (
('dataset1', ([0, 10],)),
('dataset2', ([0, 20],)),
('dataset3', ([0, 70],)),
)
opt = {'padding': {'left': 0, 'right': 0, 'top': 0, 'bottom': 0},
'pieRadius': 0.5}
ch = pycha.pie.PieChart(surface, opt)
ch.addDataset(dataset)
ch._updateXY()
ch._updateChart()
ch._updateTicks()
self.assertEqual(ch.xticks, [(0, 'dataset1 (10.0%)'),
(1, 'dataset2 (20.0%)'),
(2, 'dataset3 (70.0%)')])
ticks = [{'v': 0, 'label': 'First dataset'},
{'v': 1, 'label': 'Second dataset'},
{'v': 2, 'label': 'Third dataset'}]
opt = {'axis': {'x': {'ticks': ticks}, }, }
ch = pycha.pie.PieChart(surface, opt)
ch.addDataset(dataset)
ch._updateXY()
ch._updateChart()
ch._updateTicks()
self.assertEqual(ch.xticks, [(0, 'First dataset (10.0%)'),
(1, 'Second dataset (20.0%)'),
(2, 'Third dataset (70.0%)')])
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(SliceTests),
unittest.makeSuite(PieTests),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 36.75
| 81
| 0.578415
|
1d9d88b663109da891d5283e0a8aedbe27592fcb
| 4,333
|
py
|
Python
|
notebooks/src/combined_nn.py
|
arunparayatham/voxceleb_enrichment_age_gender
|
f13c06f852178dbd543caedd53a9909052a5fdbc
|
[
"MIT"
] | 12
|
2021-04-06T13:45:40.000Z
|
2022-03-24T18:03:03.000Z
|
notebooks/src/combined_nn.py
|
arunparayatham/voxceleb_enrichment_age_gender
|
f13c06f852178dbd543caedd53a9909052a5fdbc
|
[
"MIT"
] | 1
|
2021-10-05T10:18:22.000Z
|
2022-03-29T12:24:49.000Z
|
notebooks/src/combined_nn.py
|
arunparayatham/voxceleb_enrichment_age_gender
|
f13c06f852178dbd543caedd53a9909052a5fdbc
|
[
"MIT"
] | 4
|
2021-04-02T06:20:00.000Z
|
2022-03-15T05:37:21.000Z
|
from tensorflow.keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, BatchNormalization, Dense, Dropout, Input, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D, AveragePooling1D
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.applications import DenseNet121,ResNet50, ResNet101V2
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.regularizers import l2, l1
from tensorflow.python.keras import backend as K
import sys
sys.path.insert(1, '.')
from src.utils_models import compile_model
from tensorflow import Tensor
from tensorflow.keras.layers import Input, Conv2D, ReLU, BatchNormalization,\
Add, AveragePooling2D, Flatten, Dense, LeakyReLU, ELU
from tensorflow.keras.models import Model
import tensorflow
def lstm_cnn(input_shape, config):
model = Sequential()
model.add(LSTM(
config['lstm_cells'],
return_sequences=True,
input_shape=input_shape,
kernel_initializer=config['kernel_initializer']
))
model.add(LSTM(
config['lstm_cells'],
return_sequences=True,
kernel_initializer=config['kernel_initializer']
))
model.add(Conv1D(config['filter_n'],
kernel_size=config['kernel_size'],
activation='relu',
padding='same',
strides=config['strides'],
kernel_initializer=config['kernel_initializer'],
kernel_regularizer=l2(config['l_reg'])
))
if config['batch_norm_everywhere']:
model.add(BatchNormalization())
model.add(Conv1D(2 * config['filter_n'],
kernel_size=config['kernel_size'],
activation='relu',
padding='same',
strides=config['strides'],
kernel_initializer=config['kernel_initializer'],
kernel_regularizer=l2(config['l_reg'])
))
if config['batch_norm_everywhere']:
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=(config['pool_size'][0])))
model.add(Flatten())
model.add(Dense(
config['dense_n'],
kernel_initializer=config['kernel_initializer'],
activation="relu"
))
model.add(Dense(
1,
kernel_initializer=config['kernel_initializer']
))
compile_model(model, config)
model.summary()
return model
def cnn_lstm(input_shape, config):
model = Sequential()
model.add(Conv1D(config['filter_n'],
kernel_size=config['kernel_size'],
activation='relu',
padding='same',
strides=config['strides'],
kernel_initializer=config['kernel_initializer'],
kernel_regularizer=l2(config['l_reg']),
input_shape=input_shape
))
if config['batch_norm_everywhere']:
model.add(BatchNormalization())
model.add(Conv1D(2 * config['filter_n'],
kernel_size=config['kernel_size'],
activation='relu',
padding='same',
strides=config['strides'],
kernel_initializer=config['kernel_initializer'],
kernel_regularizer=l2(config['l_reg'])
))
if config['batch_norm_everywhere']:
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=(config['pool_size'][0])))
model.add(LSTM(
config['lstm_cells'],
return_sequences=True,
input_shape=input_shape,
kernel_initializer=config['kernel_initializer']
))
model.add(LSTM(
config['lstm_cells'],
return_sequences=False,
kernel_initializer=config['kernel_initializer']
))
model.add(Flatten())
model.add(Dense(
config['dense_n'],
kernel_initializer=config['kernel_initializer'],
activation="relu"
))
model.add(Dense(
1,
kernel_initializer=config['kernel_initializer']
))
compile_model(model, config)
model.summary()
return model
| 36.108333
| 202
| 0.60697
|
3e62e8ab90995c63c9b6014302f136774284ce7a
| 600
|
py
|
Python
|
math/pascal.py
|
jcarreiro/jmc-python
|
979c67ca489d4e80e6be96d23f8bcecacabbee59
|
[
"MIT"
] | null | null | null |
math/pascal.py
|
jcarreiro/jmc-python
|
979c67ca489d4e80e6be96d23f8bcecacabbee59
|
[
"MIT"
] | null | null | null |
math/pascal.py
|
jcarreiro/jmc-python
|
979c67ca489d4e80e6be96d23f8bcecacabbee59
|
[
"MIT"
] | null | null | null |
# Generates Pascal's Triangle
#
# 1
# 1 1
# 1 2 1
# 1 3 3 1
#
# etc...
from numeric import *
# This is an iterative solution.
def pascals_triangle(n):
for r in range(0, n):
for c in range(0, r+1):
print n_choose_k(r, c),
print
# This is a tree recursive solution.
# Each node t_r,c = t_r-1,c-1 + t_r-1,c.
def t(r, c):
if c == 0 or c == r:
return 1
return t(r - 1, c - 1) + t(r - 1, c)
def pascals_triangle_tree(n):
for r in range(0, n):
for c in range(0, r+1):
print t(r, c),
print
| 19.354839
| 40
| 0.5
|
1fd13242cf2b9251931fdaeaae321df99d3af874
| 1,274
|
py
|
Python
|
tasks/task5/5_3.py
|
eweilow/si1336-simulation-and-modeling
|
98a8f4e8067a82584e8d6e4aa3b8c7bee6b0dc2b
|
[
"MIT"
] | null | null | null |
tasks/task5/5_3.py
|
eweilow/si1336-simulation-and-modeling
|
98a8f4e8067a82584e8d6e4aa3b8c7bee6b0dc2b
|
[
"MIT"
] | null | null | null |
tasks/task5/5_3.py
|
eweilow/si1336-simulation-and-modeling
|
98a8f4e8067a82584e8d6e4aa3b8c7bee6b0dc2b
|
[
"MIT"
] | 1
|
2020-11-05T08:07:06.000Z
|
2020-11-05T08:07:06.000Z
|
import matplotlib
from matplotlib import patheffects
import matplotlib.pyplot as plt
import numpy as np
import struct
maxStepSizes = []
means = []
cVs = []
ratios = []
temperatures = []
averageDistances = []
structFormat = struct.Struct("dddddd")
with open("./5_3/data.bin", "rb") as f:
count, = struct.unpack("I", f.read(struct.calcsize("I")))
for i in range(count):
maxStepSize, temperature, mean, cV, ratio, averageDistance, = structFormat.unpack_from(
f.read(structFormat.size))
maxStepSizes.append(maxStepSize)
temperatures.append(temperature)
means.append(mean)
cVs.append(cV)
ratios.append(ratio)
averageDistances.append(averageDistance)
plt.xkcd()
matplotlib.rcParams['path.effects'] = [
patheffects.withStroke(linewidth=0, foreground='w')]
plt.figure()
plt.semilogx(temperatures, averageDistances, linewidth=1)
plt.xlabel("Temperature")
plt.ylabel("Average particle distance")
plt.savefig("./plots/5_3/acceptance.png", dpi=200, bbox_inches='tight')
plt.figure()
plt.plot(temperatures, averageDistances, linewidth=1)
plt.xlim((0.2, 1.0))
plt.xlabel("Temperature")
plt.ylabel("Average particle distance")
plt.savefig("./plots/5_3/small_range.png", dpi=200, bbox_inches='tight')
| 27.106383
| 95
| 0.709576
|
7646dc35449c0e7099462a0fed2ca45427a5c577
| 940
|
py
|
Python
|
xsrc/nqbp/tests/workspace1/PkgA/scripts/example_preprocessing_base.py
|
johnttaylor/foxtail
|
86e4e1d19d5e8f9c1d1064cf0939f4bf62615400
|
[
"BSD-3-Clause"
] | null | null | null |
xsrc/nqbp/tests/workspace1/PkgA/scripts/example_preprocessing_base.py
|
johnttaylor/foxtail
|
86e4e1d19d5e8f9c1d1064cf0939f4bf62615400
|
[
"BSD-3-Clause"
] | null | null | null |
xsrc/nqbp/tests/workspace1/PkgA/scripts/example_preprocessing_base.py
|
johnttaylor/foxtail
|
86e4e1d19d5e8f9c1d1064cf0939f4bf62615400
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
#---------------------------------------------------------------------------
# This is example of the 'single instance' NQBP pre-processing script
# where <arg7> <arg8> ... are project specific arguments passed to the
# <preprocess-script.py> script when it is
# executed.
# Usage:
# example_preprocessing_base <a1> <a2> <a3> <a4> <a5> <a6> <prjarg1>
#
# where:
# <a1>: build|clean
# <a2>: verbose|terse
# <a3>: <workspace-dir>
# <a4>: <package-dir>
# <a5>: <project-dir>
# <a6>: <current-dir>
# <prjarg1>: <compiler> // mingw|mingw_64|vc12|etc.
#---------------------------------------------------------------------------
# get definition of the Options structure
import sys
# Do stuff...
print( "--> Example Pre-Processing Script" )
if (sys.argv[2] == 'verbose'):
print( "= ECHO: " + ' '.join(sys.argv) )
| 34.814815
| 77
| 0.471277
|
d6c76e83d469c4705a5d79868b6a7b55ab6ce3dd
| 4,087
|
py
|
Python
|
driller/local_callback.py
|
xiaosatianyu/driller-y
|
cb83d8169de73b9ba05da416fc6076946c64e2d0
|
[
"BSD-2-Clause"
] | null | null | null |
driller/local_callback.py
|
xiaosatianyu/driller-y
|
cb83d8169de73b9ba05da416fc6076946c64e2d0
|
[
"BSD-2-Clause"
] | null | null | null |
driller/local_callback.py
|
xiaosatianyu/driller-y
|
cb83d8169de73b9ba05da416fc6076946c64e2d0
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import sys
import signal
import logging
import driller #pylint:disable=relative-import,unused-import
import subprocess
import multiprocessing
l = logging.getLogger("local_callback")
def _run_drill(drill, fuzz, _path_to_input_to_drill):
_binary_path = fuzz.binary_path
_fuzzer_out_dir = fuzz.out_dir
_bitmap_path = os.path.join(_fuzzer_out_dir, 'fuzzer-master', "fuzz_bitmap")
_timeout = drill._worker_timeout
l.warning("starting drilling of %s, %s", os.path.basename(_binary_path), os.path.basename(_path_to_input_to_drill))
args = (
"timeout", "-k", str(_timeout+10), str(_timeout),
sys.executable, os.path.abspath(__file__),
_binary_path, _fuzzer_out_dir, _bitmap_path, _path_to_input_to_drill
)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
print p.communicate()
class LocalCallback(object):
def __init__(self, num_workers=1, worker_timeout=10*60):
self._already_drilled_inputs = set()
self._num_workers = num_workers
self._running_workers = []
self._worker_timeout = worker_timeout
@staticmethod
def _queue_files(fuzz, fuzzer='fuzzer-master'):
'''
retrieve the current queue of inputs from a fuzzer
:return: a list of strings which represent a fuzzer's queue
'''
queue_path = os.path.join(fuzz.out_dir, fuzzer, 'queue')
queue_files = filter(lambda x: x != ".state", os.listdir(queue_path))
queue_files = [os.path.join(queue_path, q) for q in queue_files]
return queue_files
def driller_callback(self, fuzz):
l.warning("Driller stuck callback triggered!")
# remove any workers that aren't running
self._running_workers = [x for x in self._running_workers if x.is_alive()]
# get the files in queue
queue = self._queue_files(fuzz)
#for i in range(1, fuzz.fuzz_id):
# fname = "fuzzer-%d" % i
# queue.extend(self.queue_files(fname))
# start drilling
not_drilled = set(queue) - self._already_drilled_inputs
if len(not_drilled) == 0:
l.warning("no inputs left to drill")
while len(self._running_workers) < self._num_workers and len(not_drilled) > 0:
to_drill_path = list(not_drilled)[0]
not_drilled.remove(to_drill_path)
self._already_drilled_inputs.add(to_drill_path)
proc = multiprocessing.Process(target=_run_drill, args=(self, fuzz, to_drill_path))
proc.start()
self._running_workers.append(proc)
__call__ = driller_callback
def kill(self):
for p in self._running_workers:
try:
p.terminate()
os.kill(p.pid, signal.SIGKILL)
except OSError:
pass
# this is for running with bash timeout
if __name__ == "__main__":
if len(sys.argv) != 5:
l.error("INTERNAL USE ONLY -- expecting 5 arguments for driller runner, got %d", len(sys.argv))
binary_path, fuzzer_out_dir, bitmap_path, path_to_input_to_drill = sys.argv[1:5]
fuzzer_bitmap = open(bitmap_path, "r").read()
# create a folder
driller_dir = os.path.join(fuzzer_out_dir, "driller")
driller_queue_dir = os.path.join(driller_dir, "queue")
try: os.mkdir(driller_dir)
except OSError: pass
try: os.mkdir(driller_queue_dir)
except OSError: pass
# get the input
input_to_drill = open(path_to_input_to_drill, "r").read()
d = driller.Driller(binary_path, input_to_drill, fuzzer_bitmap)
count = 0
for new_input in d.drill_generator():
id_num = len(os.listdir(driller_queue_dir))
fuzzer_from = path_to_input_to_drill.split("sync/")[1].split("/")[0] + path_to_input_to_drill.split("id:")[1].split(",")[0]
filepath = "id:" + ("%d" % id_num).rjust(6, "0") + ",from:" + fuzzer_from
filepath = os.path.join(driller_queue_dir, filepath)
with open(filepath, "wb") as f:
f.write(new_input[1])
count += 1
l.warning("found %d new inputs", count)
| 36.168142
| 131
| 0.656472
|
6dc31a42a84d68378d38da0963f575d3fcc858ee
| 179,104
|
py
|
Python
|
tensorflow/python/keras/backend.py
|
khanhlvg/tensorflow
|
a59b74ccaafae59d616ecf08204d63023ff6f49c
|
[
"Apache-2.0"
] | 2
|
2019-06-28T17:43:04.000Z
|
2019-06-28T17:43:07.000Z
|
tensorflow/python/keras/backend.py
|
khanhlvg/tensorflow
|
a59b74ccaafae59d616ecf08204d63023ff6f49c
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/backend.py
|
khanhlvg/tensorflow
|
a59b74ccaafae59d616ecf08204d63023ff6f49c
|
[
"Apache-2.0"
] | 1
|
2021-02-27T07:40:01.000Z
|
2021-02-27T07:40:01.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import sys
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import session as session_module
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tfdev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
py_any = any
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping {graph: set_of_freezable_variables}.
# Each set tracks objects created via `freezable_variable` in the graph.
_FREEZABLE_VARS = weakref.WeakKeyDictionary()
# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
_DUMMY_EAGER_GRAPH = threading.local()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary()
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from tensorflow.keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=floatx())
# A global dictionary mapping graph objects to an index of counters used
# for various layer/optimizer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = get_graph()
if graph not in PER_GRAPH_OBJECT_NAME_UIDS:
PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
PER_GRAPH_OBJECT_NAME_UIDS.clear()
@keras_export('keras.backend.clear_session')
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
global _GRAPH
global _FREEZABLE_VARS
_GRAPH = None
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
with name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[graph] = phase
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
_FREEZABLE_VARS.pop(graph, None)
@keras_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.compat.v1.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
if ops.get_default_graph() is _GRAPH:
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
return symbolic_learning_phase()
with ops.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the learning
# phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
if context.executing_eagerly():
if _DUMMY_EAGER_GRAPH not in _GRAPH_LEARNING_PHASES:
# Fallback to inference mode as default.
return 0
return _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
return symbolic_learning_phase()
def global_learning_phase_is_set():
return _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
if graph not in _GRAPH_LEARNING_PHASES:
with name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
@keras_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
# In an eager context, the learning phase values applies to both the eager
# context and the internal Keras graph.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
_GRAPH_LEARNING_PHASES[get_graph()] = value
@keras_export('keras.backend.learning_phase_scope')
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
previous_eager_value = _GRAPH_LEARNING_PHASES.get(
_DUMMY_EAGER_GRAPH, None)
previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)
try:
set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
with ops.init_scope():
if context.executing_eagerly():
if previous_eager_value is not None:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_eager_value
elif _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
graph = get_graph()
if previous_graph_value is not None:
_GRAPH_LEARNING_PHASES[graph] = previous_graph_value
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager / tf.function only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert ops.executing_eagerly_outside_functions()
global_learning_phase_was_set = global_learning_phase_is_set()
if global_learning_phase_was_set:
previous_value = learning_phase()
try:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
yield
finally:
# Restore learning phase to initial value or unset.
if global_learning_phase_was_set:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_value
else:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
return ops._get_graph_from_inputs(op_input_list)
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if ops.inside_function():
raise RuntimeError('Cannot get session inside Tensorflow graph function.')
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if (getattr(_SESSION, 'session', None) is None or
_SESSION.session.graph is not _current_graph(op_input_list)):
# If we are creating the Session inside a tf.distribute.Strategy scope,
# we ask the strategy for the right session options to use.
if distribution_strategy_context.has_strategy():
configure_and_create_distributed_session(
distribution_strategy_context.get_strategy())
else:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@keras_export(v1=['keras.backend.get_session'])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Arguments:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def get_graph():
if context.executing_eagerly():
global _GRAPH
if _GRAPH is None:
_GRAPH = func_graph.FuncGraph('keras_graph')
return _GRAPH
else:
return ops.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and
_CURRENT_SCRATCH_GRAPH is not graph):
raise ValueError('Multiple scratch graphs specified.')
if _CURRENT_SCRATCH_GRAPH:
yield _CURRENT_SCRATCH_GRAPH
return
graph = graph or func_graph.FuncGraph('keras_scratch_graph')
try:
_CURRENT_SCRATCH_GRAPH = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH = None
@keras_export(v1=['keras.backend.set_session'])
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if os.environ.get('OMP_NUM_THREADS'):
logging.warning(
'OMP_NUM_THREADS is no longer used by the default Keras config. '
'To configure the number of threads, use tf.config.threading APIs.')
config = context.context().config
config.allow_soft_placement = True
return config
def get_default_graph_uid_map():
graph = ops.get_default_graph()
name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections.defaultdict(int)
PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map
return name_uid_map
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
if tfdev.is_device_spec(device):
device = device.to_string()
self.device = device
def _set_device_from_string(self, device_str):
self.device = device_str
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
return tfdev.DeviceSpec.from_string(op.device)
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [name for name in context.list_devices() if 'GPU' in name]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Arguments:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@keras_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
@keras_export('keras.backend.name_scope', v1=[])
def name_scope(name):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
# Define some computation that uses `a`.
return foo_op(..., name=scope)
```
When executed, the Tensor `a` will have the name `MyOp/a`.
Args:
name: The prefix to use on all names created within the name scope.
Returns:
Name scope context manager.
"""
return ops.name_scope_v2(name)
@keras_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> import numpy as np
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
graph = get_graph()
optimizers = _GRAPH_TF_OPTIMIZERS.setdefault(graph, weakref.WeakSet())
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
if graph not in _GRAPH_VARIABLES:
_GRAPH_VARIABLES[graph] = weakref.WeakSet()
_GRAPH_VARIABLES[graph].add(v)
def unique_object_name(name,
name_uid_map=None,
avoid_names=None,
namespace='',
zero_based=False):
"""Makes a object name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default) does not avoid any names.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
Returns:
Unique string name.
Example:
```python
_unique_layer_name('dense') # dense_1
_unique_layer_name('dense') # dense_2
```
"""
if name_uid_map is None:
name_uid_map = get_default_graph_uid_map()
if avoid_names is None:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES.setdefault(graph, weakref.WeakSet())
for opt in _GRAPH_TF_OPTIMIZERS.get(graph, set()):
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@keras_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> import tensorflow as tf
>>> import numpy
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.compat.v1.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@keras_export('keras.backend.placeholder')
def placeholder(shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
ragged=False):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
ragged: Boolean, whether the placeholder should have a ragged type.
In this case, values of 'None' in the 'shape' argument represent
ragged dimensions. For more information about RaggedTensors, see this
[guide](https://www.tensorflow.org/guide/ragged_tensors).
Raises:
ValueError: If called with eager execution
ValueError: If called with sparse = True and ragged = True.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True when creating a placeholder.'
)
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
elif ragged:
ragged_rank = 0
for i in range(1, len(shape)):
if shape[i] is None:
ragged_rank += 1
else:
break
value_shape = shape[(ragged_rank + 1):]
x = ragged_factory_ops.placeholder(
dtype=dtype,
ragged_rank=ragged_rank,
value_shape=value_shape,
name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
if isinstance(x, composite_tensor.CompositeTensor):
flat_components = nest.flatten(x, expand_composites=True)
return py_any(is_placeholder(c) for c in flat_components)
else:
return x.op.type == 'Placeholder'
except AttributeError:
return False
def freezable_variable(value, shape=None, name=None):
"""A tensor-like object whose value can be updated only up until execution.
After creating the freezable variable, you can update its value by calling
`var.update_value(new_value)` (similar to a regular variable).
Unlike an actual variable, the value used during execution is the current
value at the time the execution function (`backend.function()`) was created.
This is an internal API, expected to be temporary. It is used to implement a
mutable `trainable` property for `BatchNormalization` layers, with a frozen
value after model compilation.
We don't use a plain variable in this case because we need the value used
in a specific model to be frozen after `compile` has been called
(e.g. GAN use case).
Arguments:
value: The initial value for the tensor-like object.
shape: The shape for the tensor-like object (cannot be changed).
name: The name for the tensor-like object.
Returns:
A tensor-like object with a static value that can be updated via
`x.update_value(new_value)`, up until creating an execution function
(afterwards the value is fixed).
"""
graph = get_graph()
with graph.as_default():
x = array_ops.placeholder_with_default(
value, shape=shape, name=name)
x._initial_value = value
x._current_value = value
def update_value(new_value):
x._current_value = new_value
def get_value():
return x._current_value
x.update_value = update_value
x.get_value = get_value
global _FREEZABLE_VARS
if graph not in _FREEZABLE_VARS:
_FREEZABLE_VARS[graph] = weakref.WeakSet()
_FREEZABLE_VARS[graph].add(x)
return x
@keras_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
@keras_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@keras_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32'
```
"""
return x.dtype.base_dtype.name
@keras_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return get_value(to_dense(x))
@keras_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple or list of integers, shape of returned Keras variable
dtype: data type of returned Keras variable
name: name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
from tensorflow.keras import backend as K
kvar = K.zeros((3,4))
K.eval(kvar)
# array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.],
# [ 0., 0., 0., 0.]], dtype=float32)
A = tf.constant([1,2,3])
kvar2 = K.zeros(A.shape) # [0., 0., 0.] float32 by default
kvar3 = K.zeros(A.shape,dtype=tf.int32) # [0, 0, 0] with int32 dtype
kvar4 = K.zeros([2,3]) # [[0., 0., 0.], [0., 0., 0.]]
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@keras_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: dtype of returned Keras variable.
`None` uses the dtype of `x`.
name: name for the variable to create.
Returns:
A Keras variable with the shape of `x` filled with zeros.
Example:
```python
from tensorflow.keras import backend as K
kvar = K.variable(np.random.random((2,3)))
kvar_zeros = K.zeros_like(kvar)
K.eval(kvar_zeros)
# array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@keras_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@keras_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.shape.as_list())
@keras_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Examples:
Cast a float32 variable to a float64 tensor
```python
>>> import tensorflow as tf
>>> from tensorflow.keras import backend as K
>>> input = K.ones(shape=(1,3))
>>> print(input)
>>> cast_input = K.cast(input, dtype='float64')
>>> print(cast_input)
<tf.Variable 'Variable:0' shape=(1, 3) dtype=float32,
numpy=array([[1., 1., 1.]], dtype=float32)>
tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@keras_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@keras_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@keras_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@keras_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
# `training` is higher-up than the Keras backend in the abstraction hierarchy.
# In particular, `training` depends on layers, and thus on Keras.
# moving_averages, being low-level ops, should not be part of the training
# module.
from tensorflow.python.training import moving_averages # pylint: disable=g-import-not-at-top
zero_debias = not tf2.enabled()
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=zero_debias)
# LINEAR ALGEBRA
@keras_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@keras_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x_ndim - 1, y_ndim - 2]
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
@keras_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
@keras_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@keras_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@keras_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@keras_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@keras_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@keras_export('keras.backend.cumsum')
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@keras_export('keras.backend.cumprod')
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@keras_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@keras_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@keras_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@keras_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@keras_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@keras_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@keras_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@keras_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@keras_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@keras_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@keras_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@keras_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@keras_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@keras_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _constant_to_tensor(min_value, x.dtype.base_dtype)
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
@keras_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@keras_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@keras_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@keras_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@keras_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@keras_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@keras_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.maximum(x, y)
@keras_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@keras_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@keras_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@keras_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@keras_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@keras_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@keras_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
"""
return array_ops.reshape(x, shape)
@keras_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
"""
return array_ops.transpose(x, perm=pattern)
@keras_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_images_v2(
x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)
elif interpolation == 'bilinear':
x = image_ops.resize_images_v2(x, new_shape,
method=image_ops.ResizeMethod.BILINEAR)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@keras_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@keras_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@keras_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@keras_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@keras_export('keras.backend.tile')
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@keras_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
"""
return array_ops.reshape(x, [-1])
@keras_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
Examples:
Flattening a 3D tensor to 2D by collapsing the last dimension.
```python
>>> from tensorflow.keras import backend as K
>>> x_batch = K.ones(shape=(2, 3, 4, 5))
>>> x_batch_flatten = K.batch_flatten(x_batch)
>>> K.int_shape(x_batch_flatten)
(2, 60)
```
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@keras_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@keras_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@keras_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
"""
return array_ops.stack(x, axis=axis)
@keras_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@keras_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
@keras_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
"""
if not tensor_util.is_tensor(x):
return x
if context.executing_eagerly():
return x.numpy()
if not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
if ops.executing_eagerly_outside_functions():
# This method of evaluating works inside the Keras FuncGraph.
return function([], x)(x)
return x.eval(session=get_session((x,)))
@keras_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@keras_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
with ops.init_scope():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@keras_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
with ops.init_scope():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype,
shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
@keras_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
if isinstance(x, ops.Tensor) and hasattr(x, 'graph'):
with get_graph().as_default():
op = logging_ops.print_v2(message, x, output_stream=sys.stdout)
with ops.control_dependencies([op]):
return array_ops.identity(x)
else:
logging_ops.print_v2(message, x, output_stream=sys.stdout)
return x
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self._inputs_structure = inputs
self.inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
self.outputs = cast_variables_to_tensor(
nest.flatten(outputs, expand_composites=True))
# TODO(b/127668432): Consider using autograph to generate these
# dependencies in call.
# Index 0 = total loss or model output for `predict`.
with ops.control_dependencies([self.outputs[0]]):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = {}
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def _eval_if_composite(self, tensor):
"""Helper method which evaluates any CompositeTensors passed to it."""
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
if isinstance(tensor, composite_tensor.CompositeTensor):
return self._session.run(tensor)
else:
return tensor
def __call__(self, inputs):
inputs = nest.flatten(inputs, expand_composites=True)
session = get_session(inputs)
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
output_structure = nest.pack_sequence_as(
self._outputs_structure,
fetched[:len(self.outputs)],
expand_composites=True)
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
return nest.map_structure(self._eval_if_composite, output_structure)
class EagerExecutionFunction(object):
"""Helper class for constructing a TF graph function from the Keras graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Unsupported.
"""
def __init__(self, inputs, outputs, updates=None, name=None):
self.name = name
self._inputs_structure = inputs
inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
outputs = nest.flatten(outputs, expand_composites=True)
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
if updates and not outputs:
# Edge case; never happens in practice
raise ValueError('Cannot create a Keras backend function with updates'
' but no outputs during eager execution.')
graphs = {
i.graph
for i in nest.flatten([inputs, outputs, updates])
if hasattr(i, 'graph')
}
if len(graphs) > 1:
raise ValueError('Cannot create an execution function which is comprised '
'of elements from multiple graphs.')
source_graph = graphs.pop()
global_graph = get_graph()
updates_ops = []
legacy_update_ops = []
for update in updates:
# For legacy reasons it is allowed to pass an update as a tuple
# `(variable, new_value)` (this maps to an assign op). Otherwise it
# is assumed to already be an op -- we cannot control its execution
# order.
if isinstance(update, tuple):
legacy_update_ops.append(update)
else:
if hasattr(update, 'op'):
update = update.op
if update is not None:
# `update.op` may have been None in certain cases.
updates_ops.append(update)
self._freezable_vars_to_feed = []
self._freezable_vars_values = []
freezable_vars_from_keras_graph = _FREEZABLE_VARS.get(global_graph, {})
with _scratch_graph() as exec_graph:
global_graph = get_graph()
if source_graph not in (exec_graph, global_graph):
raise ValueError('Unknown graph. Aborting.')
if source_graph is global_graph and exec_graph is not global_graph:
init_tensors = (
outputs + updates_ops + [p for [p, _] in legacy_update_ops] +
[p_new for [_, p_new] in legacy_update_ops
if isinstance(p_new, ops.Tensor)])
lifted_map = lift_to_graph.lift_to_graph(
init_tensors=init_tensors, graph=exec_graph, sources=inputs,
add_sources=True, handle_captures=True, base_graph=source_graph)
inputs = [lifted_map[i] for i in inputs]
outputs = [lifted_map[i] for i in outputs]
updates_ops = [lifted_map[i] for i in updates_ops]
legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new))
for p, p_new in legacy_update_ops]
# Keep track of the value to feed to any "freezable variables"
# created in this graph.
for old_op, new_op in lifted_map.items():
if old_op in freezable_vars_from_keras_graph:
frozen_var = old_op
if frozen_var._initial_value != frozen_var._current_value:
# We only feed a frozen_variable if its value has changed;
# otherwise it can rely on the default value of the
# underlying placeholder_with_default.
self._freezable_vars_to_feed.append(new_op)
self._freezable_vars_values.append(frozen_var._current_value)
# Consolidate updates
with exec_graph.as_default():
outputs = cast_variables_to_tensor(outputs)
with ops.control_dependencies(outputs):
for p, p_new in legacy_update_ops:
updates_ops.append(state_ops.assign(p, p_new))
self.inputs, self.outputs = inputs, outputs
self._input_references = self.inputs + self._freezable_vars_to_feed
with ops.control_dependencies(updates_ops):
self.outputs[0] = array_ops.identity(self.outputs[0])
exec_graph.inputs = self._input_references + list(
exec_graph.captures.values())
exec_graph.outputs = self.outputs
graph_fn = eager_function.ConcreteFunction(exec_graph)
graph_fn._num_positional_args = len(self._input_references)
graph_fn._arg_keywords = []
self._graph_fn = graph_fn
# Handle placeholders with default
# (treated as required placeholder by graph functions)
self._placeholder_default_values = {}
with exec_graph.as_default():
for x in self.inputs:
if x.op.type == 'PlaceholderWithDefault':
self._placeholder_default_values[x] = tensor_util.constant_value(
x.op.inputs[0])
def __call__(self, inputs):
input_values = nest.flatten(inputs, expand_composites=True)
if self._freezable_vars_values:
input_values = input_values + self._freezable_vars_values
converted_inputs = []
for tensor, value in zip(self._input_references, input_values):
if value is None:
# Assume `value` is a placeholder with default
value = self._placeholder_default_values.get(tensor, None)
if value is None:
raise ValueError(
'You must feed a value for placeholder %s' % (tensor,))
if not isinstance(value, ops.Tensor):
value = ops.convert_to_tensor(value, dtype=tensor.dtype)
if value.dtype != tensor.dtype:
# Temporary workaround due to `convert_to_tensor` not casting floats.
# See b/119637405
value = math_ops.cast(value, tensor.dtype)
converted_inputs.append(value)
outputs = self._graph_fn(*converted_inputs)
# EagerTensor.numpy() will often make a copy to ensure memory safety.
# However in this case `outputs` is not directly returned, so it is always
# safe to reuse the underlying buffer without checking. In such a case the
# private numpy conversion method is preferred to guarantee performance. We
# also have to call `_cpu_nograd()` since the Tensor may not be on the CPU.
# (otherwise it's just a no-op.)
return nest.pack_sequence_as(
self._outputs_structure, [x._cpu_nograd()._numpy() for x in outputs], # pylint: disable=protected-access
expand_composites=True)
@keras_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not support during '
'eager execution. You passed: %s' % (kwargs,))
return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs)
@keras_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@keras_export('keras.backend.stop_gradient')
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@keras_export('keras.backend.rnn')
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: If specified, assume time dimension is of this length.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
assert not nest.is_sequence(mask_t)
assert not nest.is_sequence(input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_sequence(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp,
tuple(states) + tuple(constants))
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = _expand_mask(mask_t, new_state)
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where(
_expand_mask(mask_list[-1], last_output),
last_output,
zeros_like(last_output))
outputs = array_ops.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
zeros_like(outputs))
else:
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, tuple(states) + tuple(constants))
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants))
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': input_length,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
tiled_mask_t = tuple(_expand_mask(mask_t, o) for o in flat_output)
flat_new_output = tuple(
array_ops.where(m, o, zo) for m, o, zo in zip(
tiled_mask_t, flat_output, flat_mask_output))
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_state)
flat_final_state = tuple(
array_ops.where(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_state, flat_state))
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if isinstance(output_, ops.Tensor):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_export('keras.backend.switch')
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@keras_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
Example:
```python:
import tensorflow as tf
from tensorflow.keras import backend as K
a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3])
print("a: ", a)
b = tf.constant([.9, .05, .05, .5, .89, .6, .05, .01, .94], shape=[3,3])
print("b: ", b)
loss = K.categorical_crossentropy(a, b)
print('Loss: ', loss) #Loss: tf.Tensor([0.10536055 0.8046684 0.06187541], shape=(3,), dtype=float32)
loss = K.categorical_crossentropy(a, a)
print('Loss: ', loss) #Loss: tf.Tensor([1.1920929e-07 1.1920929e-07 1.1920929e-07], shape=(3,), dtype=float32)
```
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(
labels=target, logits=output, axis=axis)
@keras_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
if isinstance(output.shape, (tuple, list)):
output_rank = len(output.shape)
else:
output_rank = output.shape.ndims
if output_rank is not None:
axis %= output_rank
if axis != output_rank - 1:
permutation = list(
itertools.chain(range(axis), range(axis + 1, output_rank), [axis]))
output = array_ops.transpose(output, perm=permutation)
elif axis != -1:
raise ValueError(
'Cannot compute sparse categorical crossentropy with `axis={}` on an '
'output tensor with unknown rank'.format(axis))
target = cast(target, 'int64')
# Try to adjust the shape so that rank of labels = 1 - rank of logits.
output_shape = array_ops.shape_v2(output)
target_rank = target.shape.ndims
update_shape = (
target_rank is not None and output_rank is not None and
target_rank != output_rank - 1)
if update_shape:
target = flatten(target)
output = array_ops.reshape(output, [-1, output_shape[-1]])
if py_any([_is_symbolic_tensor(v) for v in [target, output]]):
with get_graph().as_default():
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
else:
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
if update_shape and output_rank >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, output_shape[:-1])
else:
return res
@keras_export('keras.backend.binary_crossentropy')
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Sigmoid'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
# Compute cross entropy from probabilities.
bce = target * math_ops.log(output + epsilon())
bce += (1 - target) * math_ops.log(1 - output + epsilon())
return -bce
else:
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
@keras_export('keras.backend.sigmoid')
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@keras_export('keras.backend.hard_sigmoid')
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)
point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)
x = math_ops.mul(x, point_two)
x = math_ops.add(x, point_five)
x = clip_ops.clip_by_value(x, 0., 1.)
return x
@keras_export('keras.backend.tanh')
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@keras_export('keras.backend.dropout')
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
if seed is None:
seed = np.random.randint(10e6)
return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export('keras.backend.l2_normalize')
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@keras_export('keras.backend.in_top_k')
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@keras_export('keras.backend.conv1d')
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.conv2d')
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv2d_transpose')
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (shape(x)[0],) + tuple(output_shape[1:])
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.separable_conv2d')
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
ValueError: if `strides` is not a tuple of 2 integers.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv3d')
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export('keras.backend.pool2d')
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_size` is not a tuple of 2 integers.
ValueError: if `strides` is not a tuple of 2 integers.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.pool3d')
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend([slice(position[d] * strides[d],
position[d] * strides[d] + kernel_size[d])
for d in spatial_dimensions])
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
@keras_export('keras.backend.local_conv1d')
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.local_conv2d')
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.bias_add')
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
# pylint: disable=g-no-augmented-assignment
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = nn.bias_add(x, bias, data_format='NCHW')
else:
x = x + reshape(bias, (1, bias_shape[0], 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1))
else:
x = x + reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
# pylint: enable=g-no-augmented-assignment
return x
# RANDOMNESS
@keras_export('keras.backend.random_normal')
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_uniform')
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_binomial')
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
The binomial distribution with parameters `n` and `p` is the probability
distribution of the number of successful Bernoulli process. Only supports
`n` = 1 for now.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@keras_export('keras.backend.truncated_normal')
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@keras_export('keras.backend.ctc_label_dense_to_sparse')
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(_, current_input):
return array_ops.expand_dims(
math_ops.range(label_shape[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@keras_export('keras.backend.ctc_decode')
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@keras_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@keras_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
def configure_and_create_distributed_session(distribution_strategy):
"""Configure session config and create a session with it."""
def _create_session(distribution_strategy):
"""Create the Distributed Strategy session."""
session_config = get_default_session_config()
# If a session already exists, merge in its config; in the case there is a
# conflict, take values of the existing config.
global _SESSION
if getattr(_SESSION, 'session', None) and _SESSION.session._config:
session_config.MergeFrom(_SESSION.session._config)
if is_tpu_strategy(distribution_strategy):
# TODO(priyag, yuefengz): Remove this workaround when Distribute
# Coordinator is integrated with keras and we can create a session from
# there.
distribution_strategy.configure(session_config)
master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access
session = session_module.Session(config=session_config, target=master)
else:
worker_context = dc_context.get_current_worker_context()
if worker_context:
dc_session_config = worker_context.session_config
# Merge the default session config to the one from distribute
# coordinator, which is fine for now since they don't have
# conflicting configurations.
dc_session_config.MergeFrom(session_config)
session = session_module.Session(
config=dc_session_config, target=worker_context.master_target)
else:
distribution_strategy.configure(session_config)
session = session_module.Session(config=session_config)
set_session(session)
if multi_worker_util.in_multi_worker_mode():
dc.run_distribute_coordinator(
_create_session,
distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
else:
_create_session(distribution_strategy)
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return (strategy is not None and
strategy.__class__.__name__.startswith('TPUStrategy'))
def cast_variables_to_tensor(tensors):
def _cast_variables_to_tensor(tensor):
if isinstance(tensor, variables_module.Variable):
return array_ops.identity(tensor)
return tensor
return nest.map_structure(_cast_variables_to_tensor, tensors)
def _is_symbolic_tensor(x):
return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
| 31.44382
| 117
| 0.663949
|
e1056c0a037cd2805456217874f7a93ac3ac489f
| 1,877
|
py
|
Python
|
src/ebay_rest/api/buy_deal/__init__.py
|
gbm001/ebay_rest
|
077d3478423ccd80ff35e0361821d6a11180bc54
|
[
"MIT"
] | 3
|
2021-12-12T04:28:03.000Z
|
2022-03-10T03:29:18.000Z
|
src/ebay_rest/api/buy_deal/__init__.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 33
|
2021-06-16T20:44:36.000Z
|
2022-03-30T14:55:06.000Z
|
src/ebay_rest/api/buy_deal/__init__.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 7
|
2021-06-03T09:30:23.000Z
|
2022-03-08T19:51:33.000Z
|
# coding: utf-8
# flake8: noqa
"""
Deal API
<span class=\"tablenote\"><b>Note:</b> This is a <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> API available only to select developers approved by business units.</span><br /><br />This API allows third-party developers to search for and retrieve details about eBay deals and events, as well as the items associated with those deals and events. # noqa: E501
OpenAPI spec version: v1.3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from ..buy_deal.api.deal_item_api import DealItemApi
from ..buy_deal.api.event_api import EventApi
from ..buy_deal.api.event_item_api import EventItemApi
# import ApiClient
from ..buy_deal.api_client import ApiClient
from ..buy_deal.configuration import Configuration
# import models into sdk package
from ..buy_deal.models.amount import Amount
from ..buy_deal.models.coupon import Coupon
from ..buy_deal.models.deal_item import DealItem
from ..buy_deal.models.deal_item_search_response import DealItemSearchResponse
from ..buy_deal.models.error import Error
from ..buy_deal.models.error_parameter import ErrorParameter
from ..buy_deal.models.event import Event
from ..buy_deal.models.event_item import EventItem
from ..buy_deal.models.event_item_search_response import EventItemSearchResponse
from ..buy_deal.models.event_search_response import EventSearchResponse
from ..buy_deal.models.image import Image
from ..buy_deal.models.marketing_price import MarketingPrice
from ..buy_deal.models.shipping_option import ShippingOption
from ..buy_deal.models.terms import Terms
| 48.128205
| 569
| 0.793287
|
bad674d791a12a7c5d94db5634342e03ce304f62
| 2,312
|
py
|
Python
|
tests/test_matcher.py
|
davidz627/AmazonSyncForYNAB
|
57620a4664ac7a2df686c991f1e1b55198a240fb
|
[
"MIT"
] | 14
|
2020-05-07T21:44:00.000Z
|
2021-07-31T19:15:10.000Z
|
tests/test_matcher.py
|
davidz627/AmazonSyncForYNAB
|
57620a4664ac7a2df686c991f1e1b55198a240fb
|
[
"MIT"
] | 9
|
2020-05-07T07:48:09.000Z
|
2020-10-03T19:39:17.000Z
|
tests/test_matcher.py
|
davidz627/AmazonSyncForYNAB
|
57620a4664ac7a2df686c991f1e1b55198a240fb
|
[
"MIT"
] | 2
|
2020-09-14T02:19:00.000Z
|
2020-09-27T19:26:08.000Z
|
from context import matcher
from context import util
import re
# TODO: Fix up these tests.
def testmatchAmazonTransactions():
tests = {
"simple": {
"items": [("a", 200), ("b", 600), ("c", 1253), ("d", 112), ("e", 325), ("f", 823)],
"transactions": [112, 925, 2276],
"expMatch": {925: ["e", "b"], 112: ["d"], 2276: ["a", "c", "f"]}
},
"oneItemNoMatchCost": {
"items": [("a", 200)],
"transactions": [100],
"expMatch": {100: ["a"]}
},
"multipleItemsMatchSingleTransaction":{
"items": [("a", 200), ("b", 300)],
"transactions": [150],
"expMatch": {150: ["a", "b"]}
}
}
for testName, tc in tests.items():
print (f"running test {testName}")
gotMatch = matcher.matchAmazonTransactions(tc["items"], tc["transactions"])
assert util.equalsEnough(gotMatch, tc["expMatch"])
# TODO: following is an example of a failure
#same = [("a", 5), ("b", 5)]
#cc = [5, 5]
#print(matchAmazonTransactions(same, cc))
def testMatchAmazonToYNAB():
tests = {
"works": {
"amazonT":[{503: ["blob"], 103: ["oboe"]}],
"ynabT": [{"id": 123, "memo": "foo", "amount":-503},{"id": 321, "amount":-103}],
"expPatch": [{"id": 321, "memo": "oboe"}, {"id": 123, "memo": "blob"}],
},
"works multiple transactions": {
"amazonT":[{503: ["blob"]}, {103: ["oboe"]}],
"ynabT": [{"id": 123, "memo": "foo", "amount":-503},{"id": 321, "amount":-103}],
"expPatch": [{"id": 321, "memo": "oboe"}, {"id": 123, "memo": "blob"}],
},
"notAllInYNAB":{
"amazonT":[{503: ["blob"], 103: ["oboe"]}],
"ynabT": [{"id": 321, "amount":-103}],
"expPatch": [{"id": 321, "memo": "oboe"}],
},
"notAllInAmazon":
{
"amazonT":[{503: ["blob"]}],
"ynabT": [{"id": 123, "memo": "foo", "amount":-503},{"id": 321, "amount":-103}],
"expPatch": [{"id": 123, "memo": "blob"}],
},
}
for testName, tc in tests.items():
patch = matcher.matchAmazonToYNAB(tc["amazonT"], tc["ynabT"])
assert util.equalsEnough(patch, tc["expPatch"]) == True
| 37.290323
| 95
| 0.462803
|
cc23e9c1c3390c89e468434ce5d793364cd568ed
| 6,235
|
py
|
Python
|
real_time_face_recognition.py
|
hazbiy97/SAC
|
4202fcf4e91264b755d46ef011b283bb2bf8e7f0
|
[
"MIT"
] | null | null | null |
real_time_face_recognition.py
|
hazbiy97/SAC
|
4202fcf4e91264b755d46ef011b283bb2bf8e7f0
|
[
"MIT"
] | null | null | null |
real_time_face_recognition.py
|
hazbiy97/SAC
|
4202fcf4e91264b755d46ef011b283bb2bf8e7f0
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------------------------------------
# Code taken from https://github.com/davidsandberg/facenet with modifications
# -----------------------------------------------------------------------------------------
# coding=utf-8
"""Performs face detection in realtime.
Based on code from https://github.com/shanren7/real_time_face_recognition
"""
# MIT License
#
# Copyright (c) 2017 François Gervais
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import sys
import time
import mysql.connector as my
import cv2
import packages.face as face
'''
function write_db
function for writing mysql database from python
version 0.0
mydb = mysql database connection
sql = query function
multival:
True = write multiple rows
False = write single row
val= query values:
multival = True => val => Lists
multival = False => val => Tuples
'''
def write_db(mydb, sql, val, multival):
mycursor = mydb.cursor()
if not multival:
mycursor.execute(sql, val)
else:
mycursor.executemany(sql, val)
mydb.commit()
'''
function read_db
function for reading mysql database from python
version 0.0
mydb = mysql database connection
sql = query function
val= query values:
'''
def read_db(mydb, sql, val):
mycursor = mydb.cursor()
mycursor.execute(sql, val)
return mycursor.fetchall()
def add_overlays(frame, faces, frame_rate):
if faces is not None:
for face in faces:
face_bb = face.bounding_box.astype(int)
cv2.rectangle(frame,
(face_bb[0], face_bb[1]), (face_bb[2], face_bb[3]),
(0, 255, 0), 2)
if face.name is not None:
cv2.putText(frame, face.name, (face_bb[0], face_bb[3]),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
thickness=2, lineType=2)
cv2.putText(frame, str(frame_rate) + " fps", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
thickness=2, lineType=2)
def main(args):
frame_interval = 3 # Number of frames after which to run face detection
fps_display_interval = 5 # seconds
frame_rate = 0
frame_count = 0
video_capture = cv2.VideoCapture(0)
face_recognition = face.Recognition()
start_date = time.strftime("%Y-%m-%d",time.localtime())
start_time = time.time()
if args.debug:
print("Debug enabled")
face.debug = True
# connect to class database
mydb = my.connect(
host="localhost",
user="root",
passwd="",
database="sac"
)
# add column for today's class
sql = "select column_name from information_schema.columns where table_name = 'RPL_att' and column_name = %s"
val = (start_date,)
x = read_db(mydb, sql, val)
if not x:
sql = "alter table RPL_att add `" + start_date + "` time"
val = ()
write_db(mydb, sql, val, False)
# read class attendance
sql = "select Name, `2018-12-27` from RPL_att"
val = ()
read_att = read_db(mydb, sql, val)
class_att = {}
for i in range(len(read_att)):
class_att[read_att[i][0]]=read_att[i][1]
play = True
while play:
# Capture frame-by-frame
ret, frame = video_capture.read()
if (frame_count % frame_interval) == 0:
faces = face_recognition.identify(frame, 0.5)
# Check our current fps
end_time = time.time()
if (end_time - start_time) > fps_display_interval:
frame_rate = int(frame_count / (end_time - start_time))
start_time = time.time()
frame_count = 0
add_overlays(frame, faces, frame_rate)
frame_count += 1
cv2.imshow('Attendance', frame)
cv2.moveWindow('Attendance', 405, 180)
# Update attendance
for fc in faces:
if fc.name is not "Unknown":
class_att[fc.name] = time.strftime("%H:%M:%S",time.localtime())
'''
# realtime db update
sql = "update RPL_att set `" + start_date + "` = %s where Name = %s"
val = (time.strftime("%H:%M:%S",time.localtime()), fc.name)
write_db(mydb, sql, val)
'''
if cv2.waitKey(100) & 0xFF == ord('q'):
break
play = cv2.getWindowProperty('Attendance', 0) >= 0 # check if 'Attendance' window is closed
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
# update database
write_att = []
for key in class_att.keys():
write_att.append((class_att[key],key))
sql = "update RPL_att set `" + start_date + "` = %s where Name = %s"
write_db(mydb, sql, write_att, True)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true',
help='Enable some debug outputs.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 32.989418
| 112
| 0.60417
|
2f18f2bbba51eb959a7da022afe24a3dde1d905b
| 14,208
|
py
|
Python
|
src/tests/google/appengine/ext/datastore_admin/utils.py
|
cooljeanius/cauliflowervest
|
a9bc209b610a927083bf16274d8451c6c45227bf
|
[
"Apache-2.0"
] | 1
|
2020-10-13T19:53:04.000Z
|
2020-10-13T19:53:04.000Z
|
src/tests/google/appengine/ext/datastore_admin/utils.py
|
cooljeanius/cauliflowervest
|
a9bc209b610a927083bf16274d8451c6c45227bf
|
[
"Apache-2.0"
] | null | null | null |
src/tests/google/appengine/ext/datastore_admin/utils.py
|
cooljeanius/cauliflowervest
|
a9bc209b610a927083bf16274d8451c6c45227bf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Used render templates for datastore admin."""
import base64
import datetime
import logging
import os
import random
from google.appengine.api import lib_config
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.db import metadata
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import control
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import model
from google.appengine.ext.webapp import _template
MEMCACHE_NAMESPACE = '_ah-datastore_admin'
XSRF_VALIDITY_TIME = 600
KINDS_AND_SIZES_VAR = 'kinds_and_sizes'
class ConfigDefaults(object):
"""Configurable constants.
To override datastore_admin configuration values, define values like this
in your appengine_config.py file (in the root of your app):
datastore_admin_MAPREDUCE_PATH = /_ah/mapreduce
"""
BASE_PATH = '/_ah/datastore_admin'
MAPREDUCE_PATH = '/_ah/mapreduce'
CLEANUP_MAPREDUCE_STATE = True
config = lib_config.register('datastore_admin', ConfigDefaults.__dict__)
config.BASE_PATH
def RenderToResponse(handler, template_file, template_params):
"""Render the given template_file using template_vals and write to response.
Args:
handler: the handler whose response we should render to
template_file: the file name only of the template file we are using
template_params: the parameters used to render the given template
"""
template_params = _GetDefaultParams(template_params)
rendered = _template.render(_GetTemplatePath(template_file), template_params)
handler.response.out.write(rendered)
def _GetTemplatePath(template_file):
"""Return the expected path for the template to render.
Args:
template_file: simple file name of template to render.
Returns:
path of template to render.
"""
return os.path.join(
os.path.dirname(__file__), 'templates', template_file)
def _GetDefaultParams(template_params):
"""Update template_params to always contain necessary paths and never be None.
"""
if not template_params:
template_params = {}
template_params.update({
'base_path': config.BASE_PATH,
'mapreduce_path': config.MAPREDUCE_PATH,
})
return template_params
def CreateXsrfToken(action):
"""Generate a token to be passed with a form for XSRF protection.
Args:
action: action to restrict token to
Returns:
suitably random token which is only valid for ten minutes and, if the user
is authenticated, is only valid for the user that generated it.
"""
user_str = _MakeUserStr()
token = base64.b64encode(
''.join([chr(int(random.random()*255)) for _ in range(0, 64)]))
memcache.set(token,
(user_str, action),
time=XSRF_VALIDITY_TIME,
namespace=MEMCACHE_NAMESPACE)
return token
def ValidateXsrfToken(token, action):
"""Validate a given XSRF token by retrieving it from memcache.
If the token has not been evicted from memcache (past ten minutes) and the
user strings are equal, then this is a valid token.
Args:
token: token to validate from memcache.
action: action that token should correspond to
Returns:
True if the token exists in memcache and the user strings are equal,
False otherwise.
"""
user_str = _MakeUserStr()
token_obj = memcache.get(token, namespace=MEMCACHE_NAMESPACE)
if not token_obj:
return False
token_str = token_obj[0]
token_action = token_obj[1]
if user_str != token_str or action != token_action:
return False
return True
def CacheStats(formatted_results):
"""Cache last retrieved kind size values in memcache.
Args:
formatted_results: list of dictionaries of the form returnned by
main._PresentableKindStats.
"""
kinds_and_sizes = {}
for kind_dict in formatted_results:
kinds_and_sizes[kind_dict['kind_name']] = kind_dict['total_bytes']
memcache.set(KINDS_AND_SIZES_VAR,
kinds_and_sizes,
namespace=MEMCACHE_NAMESPACE)
def RetrieveCachedStats():
"""Retrieve cached kind sizes from last datastore stats call.
Returns:
Dictionary mapping kind names to total bytes.
"""
kinds_and_sizes = memcache.get(KINDS_AND_SIZES_VAR,
namespace=MEMCACHE_NAMESPACE)
return kinds_and_sizes
def _MakeUserStr():
"""Make a user string to use to represent the user. 'noauth' by default."""
user = users.get_current_user()
if not user:
user_str = 'noauth'
else:
user_str = user.nickname()
return user_str
def GetPrettyBytes(bytes, significant_digits=0):
"""Get a pretty print view of the given number of bytes.
This will give a string like 'X MBytes'.
Args:
bytes: the original number of bytes to pretty print.
significant_digits: number of digits to display after the decimal point.
Returns:
A string that has the pretty print version of the given bytes.
"""
byte_prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E']
for i in range(0, 7):
exp = i * 10
if bytes < 2**(exp + 10):
if i == 0:
formatted_bytes = str(bytes)
else:
formatted_bytes = '%.*f' % (significant_digits, (bytes * 1.0 / 2**exp))
if formatted_bytes != '1':
plural = 's'
else:
plural = ''
return '%s %sByte%s' % (formatted_bytes, byte_prefixes[i], plural)
logging.error('Number too high to convert: %d', bytes)
return 'Alot'
def FormatThousands(value):
"""Format a numerical value, inserting commas as thousands separators.
Args:
value: An integer, float, or string representation thereof.
If the argument is a float, it is converted to a string using '%.2f'.
Returns:
A string with groups of 3 digits before the decimal point (if any)
separated by commas.
NOTE: We don't deal with whitespace, and we don't insert
commas into long strings of digits after the decimal point.
"""
if isinstance(value, float):
value = '%.2f' % value
else:
value = str(value)
if '.' in value:
head, tail = value.split('.', 1)
tail = '.' + tail
elif 'e' in value:
head, tail = value.split('e', 1)
tail = 'e' + tail
else:
head = value
tail = ''
sign = ''
if head.startswith('-'):
sign = '-'
head = head[1:]
while len(head) > 3:
tail = ',' + head[-3:] + tail
head = head[:-3]
return sign + head + tail
def TruncDelta(delta):
"""Strips microseconds from a timedelta."""
return datetime.timedelta(days=delta.days, seconds=delta.seconds)
def GetPrintableStrs(namespace, kinds):
"""Returns tuples describing affected kinds and namespace.
Args:
namespace: namespace being targeted.
kinds: list of kinds being targeted.
Returns:
(namespace_str, kind_str) tuple used for display to user.
"""
namespace_str = ''
if kinds:
kind_str = 'all %s entities' % ', '.join(kinds)
else:
kind_str = ''
return (namespace_str, kind_str)
def ParseKindsAndSizes(kinds):
"""Parses kind|size list and returns template parameters.
Args:
kinds: list of kinds to process.
Returns:
sizes_known: whether or not all kind objects have known sizes.
size_total: total size of objects with known sizes.
len(kinds) - 2: for template rendering of greater than 3 kinds.
"""
sizes_known = True
size_total = 0
kinds_and_sizes = RetrieveCachedStats()
if kinds_and_sizes:
for kind in kinds:
if kind in kinds_and_sizes:
size_total += kinds_and_sizes[kind]
else:
sizes_known = False
else:
sizes_known = False
if size_total:
size_total = GetPrettyBytes(size_total)
return sizes_known, size_total, len(kinds) - 2
def _CreateDatastoreConfig():
"""Create datastore config for use during datastore admin operations."""
return datastore_rpc.Configuration(force_writes=True)
class MapreduceDoneHandler(webapp.RequestHandler):
"""Handler to delete data associated with successful MapReduce jobs."""
SUFFIX = 'mapreduce_done'
def post(self):
"""Mapreduce done callback to delete job data if it was successful."""
if 'Mapreduce-Id' in self.request.headers:
mapreduce_id = self.request.headers['Mapreduce-Id']
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
keys = []
job_success = True
shard_states = model.ShardState.find_by_mapreduce_state(mapreduce_state)
for shard_state in shard_states:
keys.append(shard_state.key())
if not shard_state.result_status == 'success':
job_success = False
db_config = _CreateDatastoreConfig()
if job_success:
operation = DatastoreAdminOperation.get(
mapreduce_state.mapreduce_spec.params[
DatastoreAdminOperation.PARAM_DATASTORE_ADMIN_OPERATION])
def tx():
operation.active_jobs -= 1
operation.completed_jobs += 1
if not operation.active_jobs:
operation.status = DatastoreAdminOperation.STATUS_COMPLETED
db.delete(DatastoreAdminOperationJob.all().ancestor(operation),
config=db_config)
operation.put(config=db_config)
db.run_in_transaction(tx)
if config.CLEANUP_MAPREDUCE_STATE:
keys.append(mapreduce_state.key())
keys.append(model.MapreduceControl.get_key_by_job_id(mapreduce_id))
db.delete(keys, config=db_config)
logging.info('State for successful job %s was deleted.', mapreduce_id)
else:
logging.info('Job %s was not successful so no state was deleted.', (
mapreduce_id))
else:
logging.error('Done callback called without Mapreduce Id.')
class DatastoreAdminOperation(db.Model):
"""An entity to keep progress and status of datastore admin operation."""
STATUS_ACTIVE = "Active"
STATUS_COMPLETED = "Completed"
PARAM_DATASTORE_ADMIN_OPERATION = 'datastore_admin_operation'
description = db.TextProperty()
status = db.StringProperty()
active_jobs = db.IntegerProperty(default=0)
completed_jobs = db.IntegerProperty(default=0)
@classmethod
def kind(cls):
return "_AE_DatastoreAdmin_Operation"
class DatastoreAdminOperationJob(db.Model):
"""An entity to keep track of started jobs to ensure idempotency.
This entity can be used during spawning additional jobs. It is
always stored as a child entity of DatastoreAdminOperation.
Entity key name is job unique id.
"""
pass
def StartOperation(description):
"""Start datastore admin operation.
Args:
description: operation description to be displayed to user.
Returns:
an instance of DatastoreAdminOperation.
"""
operation = DatastoreAdminOperation(
description=description,
status=DatastoreAdminOperation.STATUS_ACTIVE,
id=db.allocate_ids(
db.Key.from_path(DatastoreAdminOperation.kind(), 1), 1)[0])
operation.put(config=_CreateDatastoreConfig())
return operation
def StartMap(operation,
job_name,
handler_spec,
reader_spec,
mapper_params,
mapreduce_params=None,
start_transaction=True):
"""Start map as part of datastore admin operation.
Will increase number of active jobs inside the operation and start new map.
Args:
operation: An instance of DatastoreAdminOperation for current operation.
job_name: Map job name.
handler_spec: Map handler specification.
reader_spec: Input reader specification.
mapper_params: Custom mapper parameters.
mapreduce_params: Custom mapreduce parameters.
start_transaction: Specify if a new transaction should be started.
Returns:
resulting map job id as string.
"""
if not mapreduce_params:
mapreduce_params = dict()
mapreduce_params[DatastoreAdminOperation.PARAM_DATASTORE_ADMIN_OPERATION] = (
str(operation.key()))
mapreduce_params['done_callback'] = '%s/%s' % (
config.BASE_PATH, MapreduceDoneHandler.SUFFIX)
mapreduce_params['force_writes'] = 'True'
def tx():
operation.active_jobs += 1
operation.put(config=_CreateDatastoreConfig())
return control.start_map(
job_name, handler_spec, reader_spec,
mapper_params,
mapreduce_parameters=mapreduce_params,
base_path=config.MAPREDUCE_PATH,
shard_count=32,
transactional=True)
if start_transaction:
return db.run_in_transaction(tx)
else:
return tx()
def RunMapForKinds(operation,
kinds,
job_name_template,
handler_spec,
reader_spec,
mapper_params):
"""Run mapper job for all entities in specified kinds.
Args:
operation: instance of DatastoreAdminOperation to record all jobs.
kinds: list of entity kinds as strings.
job_name_template: template for naming individual mapper jobs. Can
reference %(kind)s and %(namespace)s formatting variables.
handler_spec: mapper handler specification.
reader_spec: reader specification.
mapper_params: custom parameters to pass to mapper.
Returns:
Ids of all started mapper jobs as list of strings.
"""
jobs = []
for kind in kinds:
mapper_params['entity_kind'] = kind
job_name = job_name_template % {'kind': kind, 'namespace': ''}
jobs.append(StartMap(
operation, job_name, handler_spec, reader_spec, mapper_params))
return jobs
| 28.246521
| 80
| 0.702914
|
9689bc6da35cffb0c0d072d6be4ba536ce3dd535
| 98,994
|
py
|
Python
|
pyuvdata/tests/test_uvbeam.py
|
ntk688/pyuvdata
|
96be086324ba8f35815dd590429c6415411c15ea
|
[
"BSD-2-Clause"
] | null | null | null |
pyuvdata/tests/test_uvbeam.py
|
ntk688/pyuvdata
|
96be086324ba8f35815dd590429c6415411c15ea
|
[
"BSD-2-Clause"
] | null | null | null |
pyuvdata/tests/test_uvbeam.py
|
ntk688/pyuvdata
|
96be086324ba8f35815dd590429c6415411c15ea
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for uvbeam object.
"""
from __future__ import absolute_import, division, print_function
import os
import copy
import numpy as np
from astropy import units
from astropy.coordinates import Angle
import pytest
from pyuvdata import UVBeam
import pyuvdata.tests as uvtest
import pyuvdata.utils as uvutils
from pyuvdata.data import DATA_PATH
try:
from astropy_healpix import HEALPix
healpix_installed = True
except(ImportError):
healpix_installed = False
filenames = ['HERA_NicCST_150MHz.txt', 'HERA_NicCST_123MHz.txt']
cst_folder = 'NicCSTbeams'
cst_files = [os.path.join(DATA_PATH, cst_folder, f) for f in filenames]
@pytest.fixture(scope='function')
def uvbeam_data():
"""Setup and teardown for basic parameter, property and iterator tests."""
required_parameters = ['_beam_type', '_Nfreqs', '_Naxes_vec', '_Nspws',
'_pixel_coordinate_system',
'_freq_array', '_spw_array',
'_data_normalization',
'_data_array', '_bandpass_array',
'_telescope_name', '_feed_name',
'_feed_version', '_model_name',
'_model_version', '_history',
'_antenna_type']
required_properties = ['beam_type', 'Nfreqs', 'Naxes_vec', 'Nspws',
'pixel_coordinate_system',
'freq_array', 'spw_array',
'data_normalization',
'data_array', 'bandpass_array',
'telescope_name', 'feed_name',
'feed_version', 'model_name',
'model_version', 'history',
'antenna_type']
extra_parameters = ['_Naxes1', '_Naxes2', '_Npixels', '_Nfeeds', '_Npols',
'_Ncomponents_vec',
'_axis1_array', '_axis2_array', '_nside', '_ordering',
'_pixel_array', '_feed_array', '_polarization_array',
'_basis_vector_array',
'_extra_keywords', '_Nelements',
'_element_coordinate_system',
'_element_location_array', '_delay_array',
'_x_orientation',
'_interpolation_function', '_freq_interp_kind',
'_gain_array', '_coupling_matrix',
'_reference_impedance',
'_receiver_temperature_array',
'_loss_array', '_mismatch_array',
'_s_parameters']
extra_properties = ['Naxes1', 'Naxes2', 'Npixels', 'Nfeeds', 'Npols',
'Ncomponents_vec',
'axis1_array', 'axis2_array', 'nside', 'ordering',
'pixel_array', 'feed_array', 'polarization_array',
'basis_vector_array', 'extra_keywords', 'Nelements',
'element_coordinate_system',
'element_location_array', 'delay_array',
'x_orientation',
'interpolation_function', 'freq_interp_kind',
'gain_array', 'coupling_matrix',
'reference_impedance',
'receiver_temperature_array',
'loss_array', 'mismatch_array',
's_parameters']
other_properties = ['pyuvdata_version_str']
beam_obj = UVBeam()
class DataHolder():
def __init__(self, beam_obj, required_parameters, required_properties,
extra_parameters, extra_properties, other_properties):
self.beam_obj = beam_obj
self.required_parameters = required_parameters
self.required_properties = required_properties
self.extra_parameters = extra_parameters
self.extra_properties = extra_properties
self.other_properties = other_properties
uvbeam_data = DataHolder(beam_obj, required_parameters, required_properties,
extra_parameters, extra_properties, other_properties)
# yields the data we need but will continue to the del call after tests
yield uvbeam_data
# some post-test object cleanup
del(uvbeam_data)
return
def test_parameter_iter(uvbeam_data):
"""Test expected parameters."""
all = []
for prop in uvbeam_data.beam_obj:
all.append(prop)
for a in uvbeam_data.required_parameters + uvbeam_data.extra_parameters:
assert a in all, 'expected attribute ' + a + ' not returned in object iterator'
def test_required_parameter_iter(uvbeam_data):
"""Test expected required parameters."""
required = []
for prop in uvbeam_data.beam_obj.required():
required.append(prop)
for a in uvbeam_data.required_parameters:
assert a in required, 'expected attribute ' + a + ' not returned in required iterator'
def test_extra_parameter_iter(uvbeam_data):
"""Test expected optional parameters."""
extra = []
for prop in uvbeam_data.beam_obj.extra():
extra.append(prop)
for a in uvbeam_data.extra_parameters:
assert a in extra, 'expected attribute ' + a + ' not returned in extra iterator'
def test_unexpected_parameters(uvbeam_data):
"""Test for extra parameters."""
expected_parameters = uvbeam_data.required_parameters + uvbeam_data.extra_parameters
attributes = [i for i in uvbeam_data.beam_obj.__dict__.keys() if i[0] == '_']
for a in attributes:
assert a in expected_parameters, 'unexpected parameter ' + a + ' found in UVBeam'
def test_unexpected_attributes(uvbeam_data):
"""Test for extra attributes."""
expected_attributes = uvbeam_data.required_properties + \
uvbeam_data.extra_properties + uvbeam_data.other_properties
attributes = [i for i in uvbeam_data.beam_obj.__dict__.keys() if i[0] != '_']
for a in attributes:
assert a in expected_attributes, 'unexpected attribute ' + a + ' found in UVBeam'
def test_properties(uvbeam_data):
"""Test that properties can be get and set properly."""
prop_dict = dict(list(zip(uvbeam_data.required_properties + uvbeam_data.extra_properties,
uvbeam_data.required_parameters + uvbeam_data.extra_parameters)))
for k, v in prop_dict.items():
rand_num = np.random.rand()
setattr(uvbeam_data.beam_obj, k, rand_num)
this_param = getattr(uvbeam_data.beam_obj, v)
try:
assert rand_num == this_param.value
except AssertionError:
print('setting {prop_name} to a random number failed'.format(prop_name=k))
raise
def test_errors():
beam_obj = UVBeam()
pytest.raises(ValueError, beam_obj._convert_to_filetype, 'foo')
def test_peak_normalize():
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files, beam_type='efield', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
orig_bandpass_array = copy.deepcopy(efield_beam.bandpass_array)
maxima = np.zeros(efield_beam.Nfreqs)
for freq_i in range(efield_beam.Nfreqs):
maxima[freq_i] = np.amax(abs(efield_beam.data_array[:, :, :, freq_i]))
efield_beam.peak_normalize()
assert np.amax(abs(efield_beam.data_array)) == 1
assert np.sum(abs(efield_beam.bandpass_array - orig_bandpass_array * maxima)) == 0
assert efield_beam.data_normalization == 'peak'
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files, beam_type='power', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
orig_bandpass_array = copy.deepcopy(power_beam.bandpass_array)
maxima = np.zeros(efield_beam.Nfreqs)
for freq_i in range(efield_beam.Nfreqs):
maxima[freq_i] = np.amax(power_beam.data_array[:, :, :, freq_i])
power_beam.peak_normalize()
assert np.amax(abs(power_beam.data_array)) == 1
assert np.sum(abs(power_beam.bandpass_array - orig_bandpass_array * maxima)) == 0
assert power_beam.data_normalization == 'peak'
power_beam.data_normalization = 'solid_angle'
pytest.raises(NotImplementedError, power_beam.peak_normalize)
def test_stokes_matrix():
beam = UVBeam()
pytest.raises(ValueError, beam._stokes_matrix, -2)
pytest.raises(ValueError, beam._stokes_matrix, 5)
@uvtest.skipIf_no_healpix
def test_efield_to_pstokes():
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files, beam_type='efield', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
pstokes_beam = copy.deepcopy(efield_beam)
pstokes_beam.interpolation_function = 'az_za_simple'
pstokes_beam.to_healpix()
pstokes_beam.efield_to_pstokes()
pstokes_beam_2 = copy.deepcopy(efield_beam)
pstokes_beam_2.interpolation_function = 'az_za_simple'
pstokes_beam_2.to_healpix()
# convert to pstokes after interpolating
beam_return = pstokes_beam_2.efield_to_pstokes(inplace=False)
pstokes_beam = copy.deepcopy(efield_beam)
# interpolate after converting to pstokes
pstokes_beam.interpolation_function = 'az_za_simple'
pstokes_beam.efield_to_pstokes()
pstokes_beam.to_healpix()
pstokes_beam.peak_normalize()
beam_return.peak_normalize()
# NOTE: So far, the following doesn't hold unless the beams are peak_normalized again.
# This seems to be the fault of interpolation
assert np.allclose(pstokes_beam.data_array, beam_return.data_array, atol=1e-2)
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files, beam_type='power', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
pytest.raises(ValueError, power_beam.efield_to_pstokes)
def test_efield_to_power():
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files, beam_type='efield', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files, beam_type='power', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
new_power_beam = efield_beam.efield_to_power(calc_cross_pols=False, inplace=False)
# The values in the beam file only have 4 sig figs, so they don't match precisely
diff = np.abs(new_power_beam.data_array - power_beam.data_array)
assert np.max(diff) < 2
reldiff = diff / power_beam.data_array
assert np.max(reldiff) < 0.002
# set data_array tolerances higher to test the rest of the object
# tols are (relative, absolute)
tols = [0.002, 0]
power_beam._data_array.tols = tols
# modify the history to match
power_beam.history += ' Converted from efield to power using pyuvdata.'
assert power_beam == new_power_beam
# test with non-orthogonal basis vectors
# first construct a beam with non-orthogonal basis vectors
new_basis_vecs = np.zeros_like(efield_beam.basis_vector_array)
new_basis_vecs[0, 0, :, :] = np.sqrt(0.5)
new_basis_vecs[0, 1, :, :] = np.sqrt(0.5)
new_basis_vecs[1, :, :, :] = efield_beam.basis_vector_array[1, :, :, :]
new_data = np.zeros_like(efield_beam.data_array)
new_data[0, :, :, :, :, :] = np.sqrt(2) * efield_beam.data_array[0, :, :, :, :, :]
new_data[1, :, :, :, :, :] = (efield_beam.data_array[1, :, :, :, :, :]
- efield_beam.data_array[0, :, :, :, :, :])
efield_beam2 = copy.deepcopy(efield_beam)
efield_beam2.basis_vector_array = new_basis_vecs
efield_beam2.data_array = new_data
efield_beam2.check()
# now convert to power. Should get the same result
new_power_beam2 = copy.deepcopy(efield_beam2)
new_power_beam2.efield_to_power(calc_cross_pols=False)
assert new_power_beam == new_power_beam2
if healpix_installed:
# check that this raises an error if trying to convert to HEALPix:
efield_beam2.interpolation_function = 'az_za_simple'
pytest.raises(NotImplementedError, efield_beam2.to_healpix,
inplace=False)
# now try a different rotation to non-orthogonal basis vectors
new_basis_vecs = np.zeros_like(efield_beam.basis_vector_array)
new_basis_vecs[0, :, :, :] = efield_beam.basis_vector_array[0, :, :, :]
new_basis_vecs[1, 0, :, :] = np.sqrt(0.5)
new_basis_vecs[1, 1, :, :] = np.sqrt(0.5)
new_data = np.zeros_like(efield_beam.data_array)
new_data[0, :, :, :, :, :] = (efield_beam.data_array[0, :, :, :, :, :]
- efield_beam.data_array[1, :, :, :, :, :])
new_data[1, :, :, :, :, :] = np.sqrt(2) * efield_beam.data_array[1, :, :, :, :, :]
efield_beam2 = copy.deepcopy(efield_beam)
efield_beam2.basis_vector_array = new_basis_vecs
efield_beam2.data_array = new_data
efield_beam2.check()
# now convert to power. Should get the same result
new_power_beam2 = copy.deepcopy(efield_beam2)
new_power_beam2.efield_to_power(calc_cross_pols=False)
assert new_power_beam == new_power_beam2
# now construct a beam with orthogonal but rotated basis vectors
new_basis_vecs = np.zeros_like(efield_beam.basis_vector_array)
new_basis_vecs[0, 0, :, :] = np.sqrt(0.5)
new_basis_vecs[0, 1, :, :] = np.sqrt(0.5)
new_basis_vecs[1, 0, :, :] = -1 * np.sqrt(0.5)
new_basis_vecs[1, 1, :, :] = np.sqrt(0.5)
new_data = np.zeros_like(efield_beam.data_array)
new_data[0, :, :, :, :, :] = np.sqrt(0.5) * (efield_beam.data_array[0, :, :, :, :, :]
+ efield_beam.data_array[1, :, :, :, :, :])
new_data[1, :, :, :, :, :] = np.sqrt(0.5) * (-1 * efield_beam.data_array[0, :, :, :, :, :]
+ efield_beam.data_array[1, :, :, :, :, :])
efield_beam2 = copy.deepcopy(efield_beam)
efield_beam2.basis_vector_array = new_basis_vecs
efield_beam2.data_array = new_data
efield_beam2.check()
# now convert to power. Should get the same result
new_power_beam2 = copy.deepcopy(efield_beam2)
new_power_beam2.efield_to_power(calc_cross_pols=False)
assert new_power_beam == new_power_beam2
# test calculating cross pols
new_power_beam = efield_beam.efield_to_power(calc_cross_pols=True, inplace=False)
assert np.all(np.abs(new_power_beam.data_array[:, :, 0, :, :,
np.where(new_power_beam.axis1_array == 0)[0]])
> np.abs(new_power_beam.data_array[:, :, 2, :, :,
np.where(new_power_beam.axis1_array == 0)[0]]))
assert np.all(np.abs(new_power_beam.data_array[:, :, 0, :, :,
np.where(new_power_beam.axis1_array == np.pi / 2.)[0]])
> np.abs(new_power_beam.data_array[:, :, 2, :, :,
np.where(new_power_beam.axis1_array == np.pi / 2.)[0]]))
# test writing out & reading back in power files (with cross pols which are complex)
write_file = os.path.join(DATA_PATH, 'test/outtest_beam.fits')
new_power_beam.write_beamfits(write_file, clobber=True)
new_power_beam2 = UVBeam()
new_power_beam2.read_beamfits(write_file)
assert new_power_beam == new_power_beam2
# test keeping basis vectors
new_power_beam = efield_beam.efield_to_power(calc_cross_pols=False,
keep_basis_vector=True,
inplace=False)
assert np.allclose(new_power_beam.data_array, np.abs(efield_beam.data_array)**2)
# test raises error if beam is already a power beam
pytest.raises(ValueError, power_beam.efield_to_power)
# test raises error if input efield beam has Naxes_vec=3
efield_beam.Naxes_vec = 3
pytest.raises(ValueError, efield_beam.efield_to_power)
def test_freq_interpolation():
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files, beam_type='power', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
power_beam.interpolation_function = 'az_za_simple'
# test frequency interpolation returns data arrays for small and large tolerances
freq_orig_vals = np.array([123e6, 150e6])
interp_data, interp_basis_vector, interp_bandpass = \
power_beam.interp(freq_array=freq_orig_vals, freq_interp_tol=0.0,
return_bandpass=True)
assert isinstance(interp_data, np.ndarray)
assert isinstance(interp_bandpass, np.ndarray)
np.testing.assert_array_almost_equal(power_beam.bandpass_array, interp_bandpass)
np.testing.assert_array_almost_equal(power_beam.data_array, interp_data)
assert interp_basis_vector is None
interp_data, interp_basis_vector, interp_bandpass = \
power_beam.interp(freq_array=freq_orig_vals, freq_interp_tol=1.0,
return_bandpass=True)
assert isinstance(interp_data, np.ndarray)
assert isinstance(interp_bandpass, np.ndarray)
np.testing.assert_array_almost_equal(power_beam.bandpass_array, interp_bandpass)
np.testing.assert_array_almost_equal(power_beam.data_array, interp_data)
assert interp_basis_vector is None
# test frequency interpolation returns new UVBeam for small and large tolerances
power_beam.saved_interp_functions = {}
new_beam_obj = power_beam.interp(freq_array=freq_orig_vals, freq_interp_tol=0.0,
new_object=True)
assert isinstance(new_beam_obj, UVBeam)
np.testing.assert_array_almost_equal(new_beam_obj.freq_array[0], freq_orig_vals)
assert new_beam_obj.freq_interp_kind == 'linear'
# test that saved functions are erased in new obj
assert not hasattr(new_beam_obj, 'saved_interp_functions')
assert power_beam.history != new_beam_obj.history
new_beam_obj.history = power_beam.history
assert power_beam == new_beam_obj
new_beam_obj = power_beam.interp(freq_array=freq_orig_vals, freq_interp_tol=1.0,
new_object=True)
assert isinstance(new_beam_obj, UVBeam)
np.testing.assert_array_almost_equal(new_beam_obj.freq_array[0], freq_orig_vals)
# assert interp kind is 'nearest' when within tol
assert new_beam_obj.freq_interp_kind == 'nearest'
new_beam_obj.freq_interp_kind = 'linear'
assert power_beam.history != new_beam_obj.history
new_beam_obj.history = power_beam.history
assert power_beam == new_beam_obj
# test frequency interpolation returns valid new UVBeam for different
# number of freqs from input
power_beam.saved_interp_functions = {}
new_beam_obj = power_beam.interp(freq_array=np.linspace(123e6, 150e6, num=5),
freq_interp_tol=0.0, new_object=True)
assert isinstance(new_beam_obj, UVBeam)
np.testing.assert_array_almost_equal(new_beam_obj.freq_array[0],
np.linspace(123e6, 150e6, num=5))
assert new_beam_obj.freq_interp_kind == 'linear'
# test that saved functions are erased in new obj
assert not hasattr(new_beam_obj, 'saved_interp_functions')
assert power_beam.history != new_beam_obj.history
new_beam_obj.history = power_beam.history
# down select to orig freqs and test equality
new_beam_obj.select(frequencies=freq_orig_vals)
assert power_beam.history != new_beam_obj.history
new_beam_obj.history = power_beam.history
assert power_beam == new_beam_obj
# using only one freq chan should trigger a ValueError if interp_bool is True
# unless requesting the original frequency channel such that interp_bool is False.
# Therefore, to test that interp_bool is False returns array slice as desired,
# test that ValueError is not raised in this case.
# Other ways of testing this (e.g. interp_data_array.flags['OWNDATA']) does not work
_pb = power_beam.select(frequencies=power_beam.freq_array[0, :1], inplace=False)
try:
interp_data, interp_basis_vector = _pb.interp(freq_array=_pb.freq_array[0])
except ValueError:
raise AssertionError("UVBeam.interp didn't return an array slice as expected")
# test errors if one frequency
power_beam_singlef = power_beam.select(freq_chans=[0], inplace=False)
pytest.raises(ValueError, power_beam_singlef.interp, freq_array=np.array([150e6]))
# assert freq_interp_kind ValueError
power_beam.interpolation_function = 'az_za_simple'
power_beam.freq_interp_kind = None
pytest.raises(ValueError, power_beam.interp, az_array=power_beam.axis1_array,
za_array=power_beam.axis2_array,
freq_array=freq_orig_vals, polarizations=['xx'])
def test_freq_interp_real_and_complex():
# test interpolation of real and complex data are the same
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files, beam_type='power', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
power_beam.interpolation_function = 'az_za_simple'
# make a new object with more frequencies
freqs = np.linspace(123e6, 150e6, 4)
power_beam.freq_interp_kind = 'linear'
pbeam = power_beam.interp(freq_array=freqs, new_object=True)
# modulate the data
pbeam.data_array[:, :, :, 1] *= 2
pbeam.data_array[:, :, :, 2] *= 0.5
# interpolate cubic on real data
freqs = np.linspace(123e6, 150e6, 10)
pbeam.freq_interp_kind = 'cubic'
pb_int = pbeam.interp(freq_array=freqs)[0]
# interpolate cubic on complex data and compare to ensure they are the same
pbeam.data_array = pbeam.data_array.astype(np.complex)
pb_int2 = pbeam.interp(freq_array=freqs)[0]
assert np.all(np.isclose(np.abs(pb_int - pb_int2), 0))
def test_power_spatial_interpolation():
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files, beam_type='power', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x', 'y'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# check that interpolating to existing points gives the same answer
za_orig_vals, az_orig_vals = np.meshgrid(power_beam.axis2_array,
power_beam.axis1_array)
az_orig_vals = az_orig_vals.ravel(order='C')
za_orig_vals = za_orig_vals.ravel(order='C')
freq_orig_vals = np.array([123e6, 150e6])
# test error if no interpolation function is set
pytest.raises(ValueError, power_beam.interp, az_array=az_orig_vals,
za_array=za_orig_vals, freq_array=freq_orig_vals)
power_beam.interpolation_function = 'az_za_simple'
interp_data_array, interp_basis_vector = power_beam.interp(az_array=az_orig_vals,
za_array=za_orig_vals,
freq_array=freq_orig_vals)
data_array_compare = power_beam.data_array
interp_data_array = interp_data_array.reshape(data_array_compare.shape, order='F')
assert np.allclose(data_array_compare, interp_data_array)
# test that new object from interpolation is identical
new_power_beam = power_beam.interp(az_array=power_beam.axis1_array,
za_array=power_beam.axis2_array,
az_za_grid=True,
freq_array=freq_orig_vals,
new_object=True)
assert new_power_beam.freq_interp_kind == 'nearest'
assert new_power_beam.history == (power_beam.history + ' Interpolated in '
'frequency and to a new azimuth/zenith '
'angle grid using pyuvdata with '
'interpolation_function = az_za_simple '
'and freq_interp_kind = nearest.')
# make histories & freq_interp_kind equal
new_power_beam.history = power_beam.history
new_power_beam.freq_interp_kind = 'linear'
assert new_power_beam == power_beam
# test that interp to every other point returns an object that matches a select
axis1_inds = np.arange(0, power_beam.Naxes1, 2)
axis2_inds = np.arange(0, power_beam.Naxes2, 2)
select_beam = power_beam.select(axis1_inds=axis1_inds, axis2_inds=axis2_inds,
inplace=False)
interp_beam = power_beam.interp(az_array=power_beam.axis1_array[axis1_inds],
za_array=power_beam.axis2_array[axis2_inds],
az_za_grid=True, new_object=True)
assert select_beam.history != interp_beam.history
interp_beam.history = select_beam.history
assert select_beam == interp_beam
# test error if new_object set without az_za_grid
with pytest.raises(ValueError) as cm:
power_beam.interp(az_array=az_orig_vals, za_array=za_orig_vals,
freq_array=freq_orig_vals, new_object=True)
assert str(cm.value).startswith('A new object can only be returned')
# test only a single polarization
interp_data_array, interp_basis_vector = power_beam.interp(az_array=az_orig_vals,
za_array=za_orig_vals,
freq_array=freq_orig_vals,
polarizations=['xx'])
data_array_compare = power_beam.data_array[:, :, :1]
interp_data_array = interp_data_array.reshape(data_array_compare.shape, order='F')
assert np.allclose(data_array_compare, interp_data_array)
# test no errors using different points
az_interp_vals = np.array(np.arange(0, 2 * np.pi, np.pi / 9.0).tolist()
+ np.arange(0, 2 * np.pi, np.pi / 9.0).tolist())
za_interp_vals = np.array((np.zeros((18)) + np.pi / 4).tolist()
+ (np.zeros((18)) + np.pi / 12).tolist())
freq_interp_vals = np.arange(125e6, 145e6, 5e6)
# Test requesting separate polarizations on different calls while reusing splines.
interp_data_array, interp_basis_vector = power_beam.interp(az_array=az_interp_vals[:2],
za_array=za_interp_vals[:2],
freq_array=freq_interp_vals,
polarizations=['xx'], reuse_spline=True)
interp_data_array, interp_basis_vector = power_beam.interp(az_array=az_interp_vals[:2],
za_array=za_interp_vals[:2],
freq_array=freq_interp_vals,
polarizations=['yy'], reuse_spline=True)
# test reusing the spline fit.
orig_data_array, interp_basis_vector = power_beam.interp(az_array=az_interp_vals,
za_array=za_interp_vals,
freq_array=freq_interp_vals, reuse_spline=True)
reused_data_array, interp_basis_vector = power_beam.interp(az_array=az_interp_vals,
za_array=za_interp_vals,
freq_array=freq_interp_vals, reuse_spline=True)
assert np.all(reused_data_array == orig_data_array)
del power_beam.saved_interp_functions
# test errors if frequency interp values outside range
pytest.raises(ValueError, power_beam.interp, az_array=az_interp_vals,
za_array=za_interp_vals, freq_array=np.array([100]))
# test errors if positions outside range
power_beam.select(axis2_inds=np.where(power_beam.axis2_array <= np.pi / 2.)[0])
pytest.raises(ValueError, power_beam.interp, az_array=az_interp_vals,
za_array=za_interp_vals + np.pi / 2)
# test no errors only frequency interpolation
interp_data_array, interp_basis_vector = power_beam.interp(freq_array=freq_interp_vals)
# assert polarization value error
pytest.raises(ValueError, power_beam.interp, az_array=az_interp_vals,
za_array=za_interp_vals,
polarizations=['pI'])
def test_efield_spatial_interpolation():
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files, beam_type='efield', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
za_orig_vals, az_orig_vals = np.meshgrid(efield_beam.axis2_array,
efield_beam.axis1_array)
az_orig_vals = az_orig_vals.ravel(order='C')
za_orig_vals = za_orig_vals.ravel(order='C')
freq_orig_vals = np.array([123e6, 150e6])
efield_beam.interpolation_function = 'az_za_simple'
interp_data_array, interp_basis_vector = efield_beam.interp(az_array=az_orig_vals,
za_array=za_orig_vals,
freq_array=freq_orig_vals)
interp_data_array = interp_data_array.reshape(efield_beam.data_array.shape, order='F')
interp_basis_vector = interp_basis_vector.reshape(efield_beam.basis_vector_array.shape, order='F')
assert np.allclose(efield_beam.data_array, interp_data_array)
assert np.allclose(efield_beam.basis_vector_array, interp_basis_vector)
# test that new object from interpolation is identical
new_efield_beam = efield_beam.interp(az_array=efield_beam.axis1_array,
za_array=efield_beam.axis2_array,
az_za_grid=True,
freq_array=freq_orig_vals,
new_object=True)
assert new_efield_beam.freq_interp_kind == 'nearest'
assert new_efield_beam.history == (efield_beam.history + ' Interpolated in '
'frequency and to a new azimuth/zenith '
'angle grid using pyuvdata with '
'interpolation_function = az_za_simple '
'and freq_interp_kind = nearest.')
# make histories & freq_interp_kind equal
new_efield_beam.history = efield_beam.history
new_efield_beam.freq_interp_kind = 'linear'
assert new_efield_beam == efield_beam
# test that interp to every other point returns an object that matches a select
axis1_inds = np.arange(0, efield_beam.Naxes1, 2)
axis2_inds = np.arange(0, efield_beam.Naxes2, 2)
select_beam = efield_beam.select(axis1_inds=axis1_inds, axis2_inds=axis2_inds,
inplace=False)
interp_beam = efield_beam.interp(az_array=efield_beam.axis1_array[axis1_inds],
za_array=efield_beam.axis2_array[axis2_inds],
az_za_grid=True, new_object=True)
assert select_beam.history != interp_beam.history
interp_beam.history = select_beam.history
assert select_beam == interp_beam
# test no errors using different points
az_interp_vals = np.array(np.arange(0, 2 * np.pi, np.pi / 9.0).tolist()
+ np.arange(0, 2 * np.pi, np.pi / 9.0).tolist())
za_interp_vals = np.array((np.zeros((18)) + np.pi / 4).tolist()
+ (np.zeros((18)) + np.pi / 12).tolist())
freq_interp_vals = np.arange(125e6, 145e6, 10e6)
interp_data_array, interp_basis_vector = efield_beam.interp(az_array=az_interp_vals,
za_array=za_interp_vals,
freq_array=freq_interp_vals)
# test reusing the spline fit
orig_data_array, interp_basis_vector = efield_beam.interp(az_array=az_interp_vals,
za_array=za_interp_vals,
freq_array=freq_interp_vals, reuse_spline=True)
reused_data_array, interp_basis_vector = efield_beam.interp(az_array=az_interp_vals,
za_array=za_interp_vals,
freq_array=freq_interp_vals, reuse_spline=True)
assert np.all(reused_data_array == orig_data_array)
select_data_array_orig, interp_basis_vector = efield_beam.interp(az_array=az_interp_vals[0:1],
za_array=za_interp_vals[0:1],
freq_array=np.array([127e6]))
select_data_array_reused, interp_basis_vector = efield_beam.interp(az_array=az_interp_vals[0:1],
za_array=za_interp_vals[0:1],
freq_array=np.array([127e6]),
reuse_spline=True)
assert np.allclose(select_data_array_orig, select_data_array_reused)
del efield_beam.saved_interp_functions
def test_interp_longitude_branch_cut():
beam = UVBeam()
beam.read_cst_beam(cst_files, beam_type='power', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
beam.interpolation_function = 'az_za_simple'
interp_data_array, interp_basis_vector = beam.interp(
az_array=np.deg2rad(np.repeat(np.array([[-1], [359], [0], [360]]), 181, axis=1).flatten()),
za_array=np.repeat(beam.axis2_array[np.newaxis, :], 4, axis=0).flatten())
interp_data_array = interp_data_array.reshape(beam.Naxes_vec, beam.Nspws,
beam.Npols, beam.Nfreqs,
4, beam.Naxes2)
assert(np.allclose(interp_data_array[:, :, :, :, 0, :],
interp_data_array[:, :, :, :, 1, :],
rtol=beam._data_array.tols[0],
atol=beam._data_array.tols[1]))
assert(np.allclose(interp_data_array[:, :, :, :, 2, :],
interp_data_array[:, :, :, :, 3, :],
rtol=beam._data_array.tols[0],
atol=beam._data_array.tols[1]))
# repeat with efield
beam = UVBeam()
beam.read_cst_beam(cst_files, beam_type='efield', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
beam.interpolation_function = 'az_za_simple'
interp_data_array, interp_basis_vector = beam.interp(
az_array=np.deg2rad(np.repeat(np.array([[-1], [359], [0], [360]]), 181, axis=1).flatten()),
za_array=np.repeat(beam.axis2_array[np.newaxis, :], 4, axis=0).flatten())
interp_data_array = interp_data_array.reshape(beam.Naxes_vec, beam.Nspws,
beam.Nfeeds, beam.Nfreqs,
4, beam.Naxes2)
assert(np.allclose(interp_data_array[:, :, :, :, 0, :],
interp_data_array[:, :, :, :, 1, :],
rtol=beam._data_array.tols[0],
atol=beam._data_array.tols[1]))
assert(np.allclose(interp_data_array[:, :, :, :, 2, :],
interp_data_array[:, :, :, :, 3, :],
rtol=beam._data_array.tols[0],
atol=beam._data_array.tols[1]))
@uvtest.skipIf_no_healpix
def test_healpix_interpolation():
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files, beam_type='efield', frequency=[150e6, 123e6],
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
efield_beam.interpolation_function = 'az_za_simple'
orig_efield_beam = copy.deepcopy(efield_beam)
# test calling interp with healpix parameters directly gives same result
min_res = np.min(np.array([np.diff(efield_beam.axis1_array)[0],
np.diff(efield_beam.axis2_array)[0]]))
nside_min_res = np.sqrt(3 / np.pi) * np.radians(60.) / min_res
nside = int(2**np.ceil(np.log2(nside_min_res)))
new_efield_beam = efield_beam.interp(healpix_nside=nside,
new_object=True)
efield_beam2 = efield_beam.to_healpix(inplace=False)
efield_beam.to_healpix(nside=nside)
assert efield_beam2 == efield_beam
assert new_efield_beam == efield_beam
# check that interpolating to existing points gives the same answer
efield_beam.interpolation_function = 'healpix_simple'
hp_obj = HEALPix(nside=efield_beam.nside)
hpx_lon, hpx_lat = hp_obj.healpix_to_lonlat(np.arange(hp_obj.npix))
za_orig_vals = (Angle(np.pi / 2, units.radian) - hpx_lat).radian
az_orig_vals = hpx_lon.radian
az_orig_vals = az_orig_vals.ravel(order='C')
za_orig_vals = za_orig_vals.ravel(order='C')
freq_orig_vals = np.array([123e6, 150e6])
interp_data_array, interp_basis_vector = efield_beam.interp(az_array=az_orig_vals,
za_array=za_orig_vals,
freq_array=freq_orig_vals)
data_array_compare = efield_beam.data_array
interp_data_array = interp_data_array.reshape(data_array_compare.shape, order='F')
assert np.allclose(data_array_compare, interp_data_array)
# test calling interp with healpix parameters directly gives same result
new_efield_beam = efield_beam.interp(healpix_nside=efield_beam.nside,
freq_array=freq_orig_vals,
new_object=True)
assert new_efield_beam.freq_interp_kind == 'nearest'
assert new_efield_beam.history == (efield_beam.history + ' Interpolated in '
'frequency and to a new healpix grid '
'using pyuvdata with '
'interpolation_function = healpix_simple '
'and freq_interp_kind = nearest.')
# make histories & freq_interp_kind equal
new_efield_beam.history = efield_beam.history
new_efield_beam.freq_interp_kind = 'linear'
assert new_efield_beam == efield_beam
del(new_efield_beam)
# test that interp to every other point returns an object that matches a select
pixel_inds = np.arange(0, efield_beam.Npixels, 2)
select_beam = efield_beam.select(pixels=pixel_inds, inplace=False)
interp_beam = efield_beam.interp(healpix_inds=efield_beam.pixel_array[pixel_inds],
healpix_nside=efield_beam.nside, new_object=True)
assert select_beam.history != interp_beam.history
interp_beam.history = select_beam.history
assert select_beam == interp_beam
# test interp from healpix to regular az/za grid
new_reg_beam = efield_beam.interp(az_array=orig_efield_beam.axis1_array,
za_array=orig_efield_beam.axis2_array,
az_za_grid=True, new_object=True)
# this diff is pretty large. 2 rounds of interpolation is not a good thing.
# but we can check that the rest of the object makes sense
diff = new_reg_beam.data_array - orig_efield_beam.data_array
diff_ratio = diff / orig_efield_beam.data_array
assert np.all(np.abs(diff_ratio) < 3)
# set data_array tolerances higher to test the rest of the object
# tols are (relative, absolute)
tols = [3, 0]
new_reg_beam._data_array.tols = tols
assert new_reg_beam.history != orig_efield_beam.history
new_reg_beam.history = orig_efield_beam.history
new_reg_beam.interpolation_function = 'az_za_simple'
assert new_reg_beam == orig_efield_beam
# test errors with specifying healpix_inds without healpix_nside
hp_obj = HEALPix(nside=efield_beam.nside)
with pytest.raises(ValueError) as cm:
efield_beam.interp(healpix_inds=np.arange(hp_obj.npix),
freq_array=freq_orig_vals)
assert str(cm.value).startswith('healpix_nside must be set if healpix_inds is set')
# test error setting both healpix_nside and az_array
with pytest.raises(ValueError) as cm:
efield_beam.interp(healpix_nside=efield_beam.nside, az_array=az_orig_vals,
za_array=za_orig_vals, freq_array=freq_orig_vals)
assert str(cm.value).startswith('healpix_nside and healpix_inds can not be')
# basis_vector exception
efield_beam.basis_vector_array[0, 1, :] = 10.0
pytest.raises(NotImplementedError, efield_beam.interp, az_array=az_orig_vals, za_array=za_orig_vals)
# now convert to power beam
power_beam = efield_beam.efield_to_power(inplace=False)
del(efield_beam)
interp_data_array, interp_basis_vector = power_beam.interp(az_array=az_orig_vals,
za_array=za_orig_vals,
freq_array=freq_orig_vals)
data_array_compare = power_beam.data_array
interp_data_array = interp_data_array.reshape(data_array_compare.shape, order='F')
assert np.allclose(data_array_compare, interp_data_array)
# test that interp to every other point returns an object that matches a select
pixel_inds = np.arange(0, power_beam.Npixels, 2)
select_beam = power_beam.select(pixels=pixel_inds, inplace=False)
interp_beam = power_beam.interp(healpix_inds=power_beam.pixel_array[pixel_inds],
healpix_nside=power_beam.nside, new_object=True)
assert select_beam.history != interp_beam.history
interp_beam.history = select_beam.history
assert select_beam == interp_beam
# assert not feeding frequencies gives same answer
interp_data_array2, interp_basis_vector2 = power_beam.interp(az_array=az_orig_vals, za_array=za_orig_vals)
assert np.allclose(interp_data_array, interp_data_array2)
# assert not feeding az_array gives same answer
interp_data_array2, interp_basis_vector2 = power_beam.interp(az_array=az_orig_vals, za_array=za_orig_vals)
assert np.allclose(interp_data_array, interp_data_array2)
# test requesting polarization gives the same answer
interp_data_array2, interp_basis_vector2 = power_beam.interp(az_array=az_orig_vals, za_array=za_orig_vals,
polarizations=['yy'])
assert np.allclose(interp_data_array[:, :, 1:2], interp_data_array2[:, :, :1])
# change complex data_array to real data_array and test again
assert power_beam.data_array.dtype == np.complex
power_beam.data_array = np.abs(power_beam.data_array)
interp_data_array, interp_basis_vector = power_beam.interp(az_array=az_orig_vals,
za_array=za_orig_vals,
freq_array=freq_orig_vals)
data_array_compare = power_beam.data_array
interp_data_array = interp_data_array.reshape(data_array_compare.shape, order='F')
assert np.allclose(data_array_compare, interp_data_array)
# test no inputs equals same answer
interp_data_array2, interp_basis_vector2 = power_beam.interp()
assert np.allclose(interp_data_array, interp_data_array2)
# assert polarization value error
pytest.raises(ValueError, power_beam.interp, az_array=az_orig_vals, za_array=za_orig_vals,
polarizations=['pI'])
# healpix coord exception
power_beam.pixel_coordinate_system = 'foo'
pytest.raises(ValueError, power_beam.interp, az_array=az_orig_vals, za_array=za_orig_vals)
@uvtest.skipIf_no_healpix
def test_to_healpix():
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files[0], beam_type='power', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
power_beam.select(axis2_inds=np.where(power_beam.axis2_array <= np.pi / 2.)[0])
power_beam.interpolation_function = 'az_za_simple'
power_beam_healpix = power_beam.to_healpix(inplace=False)
# check that history is updated appropriately
assert power_beam_healpix.history == (
power_beam.history + ' Interpolated from '
+ power_beam.coordinate_system_dict['az_za']['description'] + ' to '
+ power_beam.coordinate_system_dict['healpix']['description']
+ ' using pyuvdata with interpolation_function = az_za_simple.')
hp_obj = HEALPix(nside=power_beam_healpix.nside)
assert power_beam_healpix.Npixels <= hp_obj.npix * 0.55
# Test error if not az_za
power_beam.pixel_coordinate_system = 'sin_zenith'
pytest.raises(ValueError, power_beam.to_healpix)
# Now check Efield interpolation
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files[0], beam_type='efield', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
efield_beam.interpolation_function = 'az_za_simple'
interp_then_sq = efield_beam.to_healpix(inplace=False)
interp_then_sq.efield_to_power(calc_cross_pols=False)
# convert to power and then interpolate to compare.
# Don't use power read from file because it has rounding errors that will dominate this comparison
sq_then_interp = efield_beam.efield_to_power(calc_cross_pols=False, inplace=False)
sq_then_interp.to_healpix()
# square then interpolate is different from interpolate then square at a
# higher level than normally allowed in the equality.
# We can live with it for now, may need to improve it later
diff = np.abs(interp_then_sq.data_array - sq_then_interp.data_array)
assert np.max(diff) < 0.5
reldiff = diff / sq_then_interp.data_array
assert np.max(reldiff) < 0.05
# set data_array tolerances higher to test the rest of the object
# tols are (relative, absolute)
tols = [0.05, 0]
sq_then_interp._data_array.tols = tols
# check history changes
interp_history_add = (' Interpolated from '
+ power_beam.coordinate_system_dict['az_za']['description']
+ ' to '
+ power_beam.coordinate_system_dict['healpix']['description']
+ ' using pyuvdata with interpolation_function = az_za_simple.')
sq_history_add = ' Converted from efield to power using pyuvdata.'
assert sq_then_interp.history == efield_beam.history + sq_history_add + interp_history_add
assert interp_then_sq.history == efield_beam.history + interp_history_add + sq_history_add
# now change history on one so we can compare the rest of the object
sq_then_interp.history = efield_beam.history + interp_history_add + sq_history_add
assert sq_then_interp == interp_then_sq
def test_select_axis():
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files[0], beam_type='power', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# add optional parameters for testing purposes
power_beam.extra_keywords = {'KEY1': 'test_keyword'}
power_beam.reference_impedance = 340.
power_beam.receiver_temperature_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.loss_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.mismatch_array = np.random.normal(0.0, 1.0, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.s_parameters = np.random.normal(0.0, 0.3, size=(4, power_beam.Nspws, power_beam.Nfreqs))
old_history = power_beam.history
# Test selecting on axis1
inds1_to_keep = np.arange(14, 63)
power_beam2 = power_beam.select(axis1_inds=inds1_to_keep, inplace=False)
assert len(inds1_to_keep) == power_beam2.Naxes1
for i in inds1_to_keep:
assert power_beam.axis1_array[i] in power_beam2.axis1_array
for i in np.unique(power_beam2.axis1_array):
assert i in power_beam.axis1_array
assert uvutils._check_histories(old_history + ' Downselected to '
'specific parts of first image axis '
'using pyuvdata.', power_beam2.history)
write_file_beamfits = os.path.join(DATA_PATH, 'test/select_beam.fits')
# test writing beamfits with only one element in axis1
inds_to_keep = [len(inds1_to_keep) + 1]
power_beam2 = power_beam.select(axis1_inds=inds_to_keep, inplace=False)
power_beam2.write_beamfits(write_file_beamfits, clobber=True)
# check for errors associated with indices not included in data
pytest.raises(ValueError, power_beam2.select, axis1_inds=[power_beam.Naxes1 - 1])
# check for warnings and errors associated with unevenly spaced image pixels
power_beam2 = copy.deepcopy(power_beam)
uvtest.checkWarnings(power_beam2.select, [], {'axis1_inds': [0, 5, 6]},
message='Selected values along first image axis are not evenly spaced')
pytest.raises(ValueError, power_beam2.write_beamfits, write_file_beamfits)
# Test selecting on axis2
inds2_to_keep = np.arange(5, 14)
power_beam2 = power_beam.select(axis2_inds=inds2_to_keep, inplace=False)
assert len(inds2_to_keep) == power_beam2.Naxes2
for i in inds2_to_keep:
assert power_beam.axis2_array[i] in power_beam2.axis2_array
for i in np.unique(power_beam2.axis2_array):
assert i in power_beam.axis2_array
assert uvutils._check_histories(old_history + ' Downselected to '
'specific parts of second image axis '
'using pyuvdata.', power_beam2.history)
write_file_beamfits = os.path.join(DATA_PATH, 'test/select_beam.fits')
# test writing beamfits with only one element in axis2
inds_to_keep = [len(inds2_to_keep) + 1]
power_beam2 = power_beam.select(axis2_inds=inds_to_keep, inplace=False)
power_beam2.write_beamfits(write_file_beamfits, clobber=True)
# check for errors associated with indices not included in data
pytest.raises(ValueError, power_beam2.select, axis2_inds=[power_beam.Naxes2 - 1])
# check for warnings and errors associated with unevenly spaced image pixels
power_beam2 = copy.deepcopy(power_beam)
uvtest.checkWarnings(power_beam2.select, [], {'axis2_inds': [0, 5, 6]},
message='Selected values along second image axis are not evenly spaced')
pytest.raises(ValueError, power_beam2.write_beamfits, write_file_beamfits)
def test_select_frequencies():
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files[0], beam_type='power', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# generate more frequencies for testing by copying and adding several times
while power_beam.Nfreqs < 8:
new_beam = copy.deepcopy(power_beam)
new_beam.freq_array = power_beam.freq_array + power_beam.Nfreqs * 1e6
power_beam += new_beam
# add optional parameters for testing purposes
power_beam.extra_keywords = {'KEY1': 'test_keyword'}
power_beam.reference_impedance = 340.
power_beam.receiver_temperature_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.loss_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.mismatch_array = np.random.normal(0.0, 1.0, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.s_parameters = np.random.normal(0.0, 0.3, size=(4, power_beam.Nspws, power_beam.Nfreqs))
old_history = power_beam.history
freqs_to_keep = power_beam.freq_array[0, np.arange(2, 7)]
power_beam2 = power_beam.select(frequencies=freqs_to_keep, inplace=False)
assert len(freqs_to_keep) == power_beam2.Nfreqs
for f in freqs_to_keep:
assert f in power_beam2.freq_array
for f in np.unique(power_beam2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(old_history + ' Downselected to '
'specific frequencies using pyuvdata.',
power_beam2.history)
write_file_beamfits = os.path.join(DATA_PATH, 'test/select_beam.fits')
# test writing beamfits with only one frequency
freqs_to_keep = power_beam.freq_array[0, 5]
power_beam2 = power_beam.select(frequencies=freqs_to_keep, inplace=False)
power_beam2.write_beamfits(write_file_beamfits, clobber=True)
# check for errors associated with frequencies not included in data
pytest.raises(ValueError, power_beam.select, frequencies=[np.max(power_beam.freq_array) + 10])
# check for warnings and errors associated with unevenly spaced frequencies
power_beam2 = copy.deepcopy(power_beam)
uvtest.checkWarnings(power_beam2.select, [],
{'frequencies': power_beam2.freq_array[0, [0, 5, 6]]},
message='Selected frequencies are not evenly spaced')
pytest.raises(ValueError, power_beam2.write_beamfits, write_file_beamfits)
# Test selecting on freq_chans
chans_to_keep = np.arange(2, 7)
power_beam2 = power_beam.select(freq_chans=chans_to_keep, inplace=False)
assert len(chans_to_keep) == power_beam2.Nfreqs
for chan in chans_to_keep:
assert power_beam.freq_array[0, chan] in power_beam2.freq_array
for f in np.unique(power_beam2.freq_array):
assert f in power_beam.freq_array[0, chans_to_keep]
assert uvutils._check_histories(old_history + ' Downselected to '
'specific frequencies using pyuvdata.',
power_beam2.history)
# Test selecting both channels and frequencies
freqs_to_keep = power_beam.freq_array[0, np.arange(6, 8)] # Overlaps with chans
all_chans_to_keep = np.arange(2, 8)
power_beam2 = power_beam.select(frequencies=freqs_to_keep,
freq_chans=chans_to_keep,
inplace=False)
assert len(all_chans_to_keep) == power_beam2.Nfreqs
for chan in all_chans_to_keep:
assert power_beam.freq_array[0, chan] in power_beam2.freq_array
for f in np.unique(power_beam2.freq_array):
assert f in power_beam.freq_array[0, all_chans_to_keep]
def test_select_feeds():
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files[0], beam_type='efield', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# add optional parameters for testing purposes
efield_beam.extra_keywords = {'KEY1': 'test_keyword'}
efield_beam.reference_impedance = 340.
efield_beam.receiver_temperature_array = np.random.normal(50.0, 5, size=(efield_beam.Nspws, efield_beam.Nfreqs))
efield_beam.loss_array = np.random.normal(50.0, 5, size=(efield_beam.Nspws, efield_beam.Nfreqs))
efield_beam.mismatch_array = np.random.normal(0.0, 1.0, size=(efield_beam.Nspws, efield_beam.Nfreqs))
efield_beam.s_parameters = np.random.normal(0.0, 0.3, size=(4, efield_beam.Nspws, efield_beam.Nfreqs))
old_history = efield_beam.history
feeds_to_keep = ['x']
efield_beam2 = efield_beam.select(feeds=feeds_to_keep, inplace=False)
assert len(feeds_to_keep) == efield_beam2.Nfeeds
for f in feeds_to_keep:
assert f in efield_beam2.feed_array
for f in np.unique(efield_beam2.feed_array):
assert f in feeds_to_keep
assert uvutils._check_histories(old_history + ' Downselected to '
'specific feeds using pyuvdata.',
efield_beam2.history)
# check for errors associated with feeds not included in data
pytest.raises(ValueError, efield_beam.select, feeds=['N'])
# check for error with selecting polarizations on efield beams
pytest.raises(ValueError, efield_beam.select, polarizations=[-5, -6])
# Test check basis vectors
efield_beam.basis_vector_array[0, 1, :, :] = 1.0
pytest.raises(ValueError, efield_beam.check)
efield_beam.basis_vector_array[0, 0, :, :] = np.sqrt(0.5)
efield_beam.basis_vector_array[0, 1, :, :] = np.sqrt(0.5)
assert efield_beam.check()
efield_beam.basis_vector_array = None
pytest.raises(ValueError, efield_beam.check)
def test_select_polarizations():
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files[0], beam_type='power', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol='xx',
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# generate more polarizations for testing by copying and adding several times
while power_beam.Npols < 4:
new_beam = copy.deepcopy(power_beam)
new_beam.polarization_array = power_beam.polarization_array - power_beam.Npols
power_beam += new_beam
# add optional parameters for testing purposes
power_beam.extra_keywords = {'KEY1': 'test_keyword'}
power_beam.reference_impedance = 340.
power_beam.receiver_temperature_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.loss_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.mismatch_array = np.random.normal(0.0, 1.0, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.s_parameters = np.random.normal(0.0, 0.3, size=(4, power_beam.Nspws, power_beam.Nfreqs))
old_history = power_beam.history
pols_to_keep = [-5, -6]
power_beam2 = power_beam.select(polarizations=pols_to_keep,
inplace=False)
assert len(pols_to_keep) == power_beam2.Npols
for p in pols_to_keep:
assert p in power_beam2.polarization_array
for p in np.unique(power_beam2.polarization_array):
assert p in pols_to_keep
assert uvutils._check_histories(old_history + ' Downselected to '
'specific polarizations using pyuvdata.',
power_beam2.history)
# check for errors associated with polarizations not included in data
pytest.raises(ValueError, power_beam.select, polarizations=[-3, -4])
# check for warnings and errors associated with unevenly spaced polarizations
uvtest.checkWarnings(power_beam.select, [], {'polarizations': power_beam.polarization_array[[0, 1, 3]]},
message='Selected polarizations are not evenly spaced')
write_file_beamfits = os.path.join(DATA_PATH, 'test/select_beam.fits')
pytest.raises(ValueError, power_beam.write_beamfits, write_file_beamfits)
# check for error with selecting on feeds on power beams
pytest.raises(ValueError, power_beam.select, feeds=['x'])
def test_select():
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files[0], beam_type='power', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# generate more frequencies for testing by copying and adding
new_beam = copy.deepcopy(power_beam)
new_beam.freq_array = power_beam.freq_array + power_beam.Nfreqs * 1e6
power_beam += new_beam
# add optional parameters for testing purposes
power_beam.extra_keywords = {'KEY1': 'test_keyword'}
power_beam.reference_impedance = 340.
power_beam.receiver_temperature_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.loss_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.mismatch_array = np.random.normal(0.0, 1.0, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.s_parameters = np.random.normal(0.0, 0.3, size=(4, power_beam.Nspws, power_beam.Nfreqs))
# now test selecting along all axes at once
old_history = power_beam.history
inds1_to_keep = np.arange(14, 63)
inds2_to_keep = np.arange(5, 14)
freqs_to_keep = [power_beam.freq_array[0, 0]]
pols_to_keep = [-5]
power_beam2 = power_beam.select(axis1_inds=inds1_to_keep,
axis2_inds=inds2_to_keep,
frequencies=freqs_to_keep,
polarizations=pols_to_keep,
inplace=False)
assert len(inds1_to_keep) == power_beam2.Naxes1
for i in inds1_to_keep:
assert power_beam.axis1_array[i] in power_beam2.axis1_array
for i in np.unique(power_beam2.axis1_array):
assert i in power_beam.axis1_array
assert len(inds2_to_keep) == power_beam2.Naxes2
for i in inds2_to_keep:
assert power_beam.axis2_array[i] in power_beam2.axis2_array
for i in np.unique(power_beam2.axis2_array):
assert i in power_beam.axis2_array
assert len(freqs_to_keep) == power_beam2.Nfreqs
for f in freqs_to_keep:
assert f in power_beam2.freq_array
for f in np.unique(power_beam2.freq_array):
assert f in freqs_to_keep
assert len(pols_to_keep) == power_beam2.Npols
for p in pols_to_keep:
assert p in power_beam2.polarization_array
for p in np.unique(power_beam2.polarization_array):
assert p in pols_to_keep
assert uvutils._check_histories(old_history + ' Downselected to '
'specific parts of first image axis, '
'parts of second image axis, '
'frequencies, polarizations using pyuvdata.',
power_beam2.history)
# repeat for efield beam
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files[0], beam_type='efield', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# generate more frequencies for testing by copying and adding
new_beam = copy.deepcopy(efield_beam)
new_beam.freq_array = efield_beam.freq_array + efield_beam.Nfreqs * 1e6
efield_beam += new_beam
# add optional parameters for testing purposes
efield_beam.extra_keywords = {'KEY1': 'test_keyword'}
efield_beam.reference_impedance = 340.
efield_beam.receiver_temperature_array = np.random.normal(50.0, 5, size=(efield_beam.Nspws, efield_beam.Nfreqs))
efield_beam.loss_array = np.random.normal(50.0, 5, size=(efield_beam.Nspws, efield_beam.Nfreqs))
efield_beam.mismatch_array = np.random.normal(0.0, 1.0, size=(efield_beam.Nspws, efield_beam.Nfreqs))
efield_beam.s_parameters = np.random.normal(0.0, 0.3, size=(4, efield_beam.Nspws, efield_beam.Nfreqs))
feeds_to_keep = ['x']
efield_beam2 = efield_beam.select(axis1_inds=inds1_to_keep,
axis2_inds=inds2_to_keep,
frequencies=freqs_to_keep,
feeds=feeds_to_keep,
inplace=False)
assert len(inds1_to_keep) == efield_beam2.Naxes1
for i in inds1_to_keep:
assert efield_beam.axis1_array[i] in efield_beam2.axis1_array
for i in np.unique(efield_beam2.axis1_array):
assert i in efield_beam.axis1_array
assert len(inds2_to_keep) == efield_beam2.Naxes2
for i in inds2_to_keep:
assert efield_beam.axis2_array[i] in efield_beam2.axis2_array
for i in np.unique(efield_beam2.axis2_array):
assert i in efield_beam.axis2_array
assert len(freqs_to_keep) == efield_beam2.Nfreqs
for f in freqs_to_keep:
assert f in efield_beam2.freq_array
for f in np.unique(efield_beam2.freq_array):
assert f in freqs_to_keep
assert len(feeds_to_keep) == efield_beam2.Nfeeds
for f in feeds_to_keep:
assert f in efield_beam2.feed_array
for f in np.unique(efield_beam2.feed_array):
assert f in feeds_to_keep
assert uvutils._check_histories(old_history + ' Downselected to '
'specific parts of first image axis, '
'parts of second image axis, '
'frequencies, feeds using pyuvdata.',
efield_beam2.history)
def test_add():
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files[0], beam_type='power', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# generate more frequencies for testing by copying and adding
new_beam = copy.deepcopy(power_beam)
new_beam.freq_array = power_beam.freq_array + power_beam.Nfreqs * 1e6
power_beam += new_beam
# add optional parameters for testing purposes
power_beam.extra_keywords = {'KEY1': 'test_keyword'}
power_beam.reference_impedance = 340.
power_beam.receiver_temperature_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.loss_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.mismatch_array = np.random.normal(0.0, 1.0, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.s_parameters = np.random.normal(0.0, 0.3, size=(4, power_beam.Nspws, power_beam.Nfreqs))
# Add along first image axis
beam1 = power_beam.select(axis1_inds=np.arange(0, 180), inplace=False)
beam2 = power_beam.select(axis1_inds=np.arange(180, 360), inplace=False)
beam1 += beam2
# Check history is correct, before replacing and doing a full object check
assert uvutils._check_histories(power_beam.history
+ ' Downselected to specific parts of '
'first image axis using pyuvdata. '
'Combined data along first image axis '
'using pyuvdata.', beam1.history)
beam1.history = power_beam.history
assert beam1 == power_beam
# Out of order - axis1
beam1 = power_beam.select(axis1_inds=np.arange(180, 360), inplace=False)
beam2 = power_beam.select(axis1_inds=np.arange(0, 180), inplace=False)
beam1 += beam2
beam1.history = power_beam.history
assert beam1 == power_beam
# Add along second image axis
beam1 = power_beam.select(axis2_inds=np.arange(0, 90), inplace=False)
beam2 = power_beam.select(axis2_inds=np.arange(90, 181), inplace=False)
beam1 += beam2
# Check history is correct, before replacing and doing a full object check
assert uvutils._check_histories(power_beam.history
+ ' Downselected to specific parts of '
'second image axis using pyuvdata. '
'Combined data along second image axis '
'using pyuvdata.', beam1.history)
beam1.history = power_beam.history
assert beam1 == power_beam
# Out of order - axis2
beam1 = power_beam.select(axis2_inds=np.arange(90, 181), inplace=False)
beam2 = power_beam.select(axis2_inds=np.arange(0, 90), inplace=False)
beam1 += beam2
beam1.history = power_beam.history
assert beam1 == power_beam
# Add frequencies
beam1 = power_beam.select(freq_chans=0, inplace=False)
beam2 = power_beam.select(freq_chans=1, inplace=False)
beam1 += beam2
# Check history is correct, before replacing and doing a full object check
assert uvutils._check_histories(power_beam.history
+ ' Downselected to specific frequencies '
'using pyuvdata. Combined data along '
'frequency axis using pyuvdata.',
beam1.history)
beam1.history = power_beam.history
assert beam1 == power_beam
# Out of order - freqs
beam1 = power_beam.select(freq_chans=1, inplace=False)
beam2 = power_beam.select(freq_chans=0, inplace=False)
beam1 += beam2
beam1.history = power_beam.history
assert beam1 == power_beam
# Add polarizations
beam1 = power_beam.select(polarizations=-5, inplace=False)
beam2 = power_beam.select(polarizations=-6, inplace=False)
beam1 += beam2
assert uvutils._check_histories(power_beam.history
+ ' Downselected to specific polarizations '
'using pyuvdata. Combined data along '
'polarization axis using pyuvdata.',
beam1.history)
beam1.history = power_beam.history
assert beam1 == power_beam
# Out of order - pols
beam1 = power_beam.select(polarizations=-6, inplace=False)
beam2 = power_beam.select(polarizations=-5, inplace=False)
beam1 += beam2
beam1.history = power_beam.history
assert beam1 == power_beam
# Add feeds
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files[0], beam_type='efield', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# generate more frequencies for testing by copying and adding
new_beam = copy.deepcopy(efield_beam)
new_beam.freq_array = efield_beam.freq_array + efield_beam.Nfreqs * 1e6
efield_beam += new_beam
# add optional parameters for testing purposes
efield_beam.extra_keywords = {'KEY1': 'test_keyword'}
efield_beam.reference_impedance = 340.
efield_beam.receiver_temperature_array = np.random.normal(50.0, 5, size=(efield_beam.Nspws, efield_beam.Nfreqs))
efield_beam.loss_array = np.random.normal(50.0, 5, size=(efield_beam.Nspws, efield_beam.Nfreqs))
efield_beam.mismatch_array = np.random.normal(0.0, 1.0, size=(efield_beam.Nspws, efield_beam.Nfreqs))
efield_beam.s_parameters = np.random.normal(0.0, 0.3, size=(4, efield_beam.Nspws, efield_beam.Nfreqs))
beam1 = efield_beam.select(feeds=efield_beam.feed_array[0], inplace=False)
beam2 = efield_beam.select(feeds=efield_beam.feed_array[1], inplace=False)
beam1 += beam2
assert uvutils._check_histories(efield_beam.history
+ ' Downselected to specific feeds '
'using pyuvdata. Combined data along '
'feed axis using pyuvdata.',
beam1.history)
beam1.history = efield_beam.history
assert beam1 == efield_beam
# Out of order - feeds
beam1 = efield_beam.select(feeds=efield_beam.feed_array[1], inplace=False)
beam2 = efield_beam.select(feeds=efield_beam.feed_array[0], inplace=False)
beam1 += beam2
beam1.history = efield_beam.history
assert beam1, efield_beam
# Add multiple axes
beam_ref = copy.deepcopy(power_beam)
beam1 = power_beam.select(axis1_inds=np.arange(0, power_beam.Naxes1 // 2),
polarizations=power_beam.polarization_array[0],
inplace=False)
beam2 = power_beam.select(axis1_inds=np.arange(power_beam.Naxes1 // 2,
power_beam.Naxes1),
polarizations=power_beam.polarization_array[1],
inplace=False)
beam1 += beam2
assert uvutils._check_histories(power_beam.history
+ ' Downselected to specific parts of '
'first image axis, polarizations using '
'pyuvdata. Combined data along first '
'image, polarization axis using pyuvdata.',
beam1.history)
# Zero out missing data in reference object
beam_ref.data_array[:, :, 0, :, :, power_beam.Naxes1 // 2:] = 0.0
beam_ref.data_array[:, :, 1, :, :, :power_beam.Naxes1 // 2] = 0.0
beam1.history = power_beam.history
assert beam1 == beam_ref
# Another combo with efield
beam_ref = copy.deepcopy(efield_beam)
beam1 = efield_beam.select(axis1_inds=np.arange(0, efield_beam.Naxes1 // 2),
axis2_inds=np.arange(0, efield_beam.Naxes2 // 2),
inplace=False)
beam2 = efield_beam.select(axis1_inds=np.arange(efield_beam.Naxes1 // 2,
efield_beam.Naxes1),
axis2_inds=np.arange(efield_beam.Naxes2 // 2,
efield_beam.Naxes2),
inplace=False)
beam1 += beam2
assert uvutils._check_histories(efield_beam.history
+ ' Downselected to specific parts of '
'first image axis, parts of second '
'image axis using pyuvdata. Combined '
'data along first image, second image '
'axis using pyuvdata.',
beam1.history)
# Zero out missing data in reference object
beam_ref.data_array[:, :, :, :, :efield_beam.Naxes2 // 2,
efield_beam.Naxes1 // 2:] = 0.0
beam_ref.data_array[:, :, :, :, efield_beam.Naxes2 // 2:,
:efield_beam.Naxes1 // 2] = 0.0
beam_ref.basis_vector_array[:, :, :efield_beam.Naxes2 // 2,
efield_beam.Naxes1 // 2:] = 0.0
beam_ref.basis_vector_array[:, :, efield_beam.Naxes2 // 2:,
:efield_beam.Naxes1 // 2] = 0.0
beam1.history = efield_beam.history
assert beam1, beam_ref
# Check warnings
# generate more frequencies for testing by copying and adding several times
while power_beam.Nfreqs < 8:
new_beam = copy.deepcopy(power_beam)
new_beam.freq_array = power_beam.freq_array + power_beam.Nfreqs * 1e6
power_beam += new_beam
beam1 = power_beam.select(freq_chans=np.arange(0, 4), inplace=False)
beam2 = power_beam.select(freq_chans=np.arange(5, 8), inplace=False)
uvtest.checkWarnings(beam1.__add__, [beam2],
message='Combined frequencies are not evenly spaced')
# generate more polarizations for testing by copying and adding several times
while power_beam.Npols < 4:
new_beam = copy.deepcopy(power_beam)
new_beam.polarization_array = power_beam.polarization_array - power_beam.Npols
power_beam += new_beam
power_beam.receiver_temperature_array = np.ones((1, 8))
beam1 = power_beam.select(polarizations=power_beam.polarization_array[0:2],
inplace=False)
beam2 = power_beam.select(polarizations=power_beam.polarization_array[3],
inplace=False)
uvtest.checkWarnings(beam1.__iadd__, [beam2],
message='Combined polarizations are not evenly spaced')
beam1 = power_beam.select(polarizations=power_beam.polarization_array[0:2],
inplace=False)
beam2 = power_beam.select(polarizations=power_beam.polarization_array[2:3],
inplace=False)
beam2.receiver_temperature_array = None
assert beam1.receiver_temperature_array is not None
uvtest.checkWarnings(beam1.__iadd__, [beam2],
message=['Only one of the UVBeam objects being combined '
'has optional parameter'])
assert beam1.receiver_temperature_array is None
# Combining histories
beam1 = power_beam.select(polarizations=power_beam.polarization_array[0:2], inplace=False)
beam2 = power_beam.select(polarizations=power_beam.polarization_array[2:4], inplace=False)
beam2.history += ' testing the history. Read/written with pyuvdata'
beam1 += beam2
assert uvutils._check_histories(power_beam.history
+ ' Downselected to specific polarizations '
'using pyuvdata. Combined data along '
'polarization axis using pyuvdata. '
'testing the history.',
beam1.history)
beam1.history = power_beam.history
assert beam1 == power_beam
# ------------------------
# Test failure modes of add function
# Wrong class
beam1 = copy.deepcopy(power_beam)
pytest.raises(ValueError, beam1.__iadd__, np.zeros(5))
params_to_change = {'beam_type': 'efield', 'data_normalization': 'solid_angle',
'telescope_name': 'foo', 'feed_name': 'foo',
'feed_version': 'v12', 'model_name': 'foo',
'model_version': 'v12', 'pixel_coordinate_system': 'sin_zenith',
'Naxes_vec': 3, 'nside': 16, 'ordering': 'nested'}
beam1 = power_beam.select(freq_chans=0, inplace=False)
for param, value in params_to_change.items():
beam2 = power_beam.select(freq_chans=1, inplace=False)
setattr(beam2, param, value)
pytest.raises(ValueError, beam1.__iadd__, beam2)
# Overlapping data
beam2 = copy.deepcopy(power_beam)
pytest.raises(ValueError, beam1.__iadd__, beam2)
@uvtest.skipIf_no_healpix
def test_healpix():
# put all the testing on healpix in this one function to minimize slow calls
# to uvbeam.to_healpix()
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files[0], beam_type='power', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# generate more frequencies for testing by copying and adding
new_beam = copy.deepcopy(power_beam)
new_beam.freq_array = power_beam.freq_array + power_beam.Nfreqs * 1e6
power_beam += new_beam
# add optional parameters for testing purposes
power_beam.extra_keywords = {'KEY1': 'test_keyword'}
power_beam.reference_impedance = 340.
power_beam.receiver_temperature_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.loss_array = np.random.normal(50.0, 5, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.mismatch_array = np.random.normal(0.0, 1.0, size=(power_beam.Nspws, power_beam.Nfreqs))
power_beam.s_parameters = np.random.normal(0.0, 0.3, size=(4, power_beam.Nspws, power_beam.Nfreqs))
power_beam.interpolation_function = 'az_za_simple'
power_beam_healpix = power_beam.to_healpix(inplace=False)
# test that Npixels make sense
n_max_pix = power_beam.Naxes1 * power_beam.Naxes2
assert power_beam_healpix.Npixels <= n_max_pix
# -----------------------
# test selecting on pixels
old_history = power_beam_healpix.history
pixels_to_keep = np.arange(31, 184)
power_beam_healpix2 = power_beam_healpix.select(pixels=pixels_to_keep, inplace=False)
assert len(pixels_to_keep) == power_beam_healpix2.Npixels
for pi in pixels_to_keep:
assert pi in power_beam_healpix2.pixel_array
for pi in np.unique(power_beam_healpix2.pixel_array):
assert pi in pixels_to_keep
assert uvutils._check_histories(old_history + ' Downselected to '
'specific healpix pixels using pyuvdata.',
power_beam_healpix2.history)
write_file_beamfits = os.path.join(DATA_PATH, 'test/select_beam.fits')
# test writing beamfits with only one pixel
pixels_to_keep = [43]
power_beam_healpix2 = power_beam_healpix.select(pixels=pixels_to_keep, inplace=False)
power_beam_healpix2.write_beamfits(write_file_beamfits, clobber=True)
# check for errors associated with pixels not included in data
pytest.raises(ValueError, power_beam_healpix.select,
pixels=[12 * power_beam_healpix.nside**2 + 10])
# test writing beamfits with non-contiguous pixels
pixels_to_keep = np.arange(2, 150, 4)
power_beam_healpix2 = power_beam_healpix.select(pixels=pixels_to_keep, inplace=False)
power_beam_healpix2.write_beamfits(write_file_beamfits, clobber=True)
# check for errors selecting pixels on non-healpix beams
pytest.raises(ValueError, power_beam.select, pixels=pixels_to_keep)
# -----------------
# check for errors selecting axis1_inds on healpix beams
inds1_to_keep = np.arange(14, 63)
pytest.raises(ValueError, power_beam_healpix.select, axis1_inds=inds1_to_keep)
# check for errors selecting axis2_inds on healpix beams
inds2_to_keep = np.arange(5, 14)
pytest.raises(ValueError, power_beam_healpix.select, axis2_inds=inds2_to_keep)
# ------------------------
# test selecting along all axes at once for healpix beams
freqs_to_keep = [power_beam_healpix.freq_array[0, 0]]
pols_to_keep = [-5]
power_beam_healpix2 = power_beam_healpix.select(pixels=pixels_to_keep,
frequencies=freqs_to_keep,
polarizations=pols_to_keep,
inplace=False)
assert len(pixels_to_keep) == power_beam_healpix2.Npixels
for pi in pixels_to_keep:
assert pi in power_beam_healpix2.pixel_array
for pi in np.unique(power_beam_healpix2.pixel_array):
assert pi in pixels_to_keep
assert len(freqs_to_keep) == power_beam_healpix2.Nfreqs
for f in freqs_to_keep:
assert f in power_beam_healpix2.freq_array
for f in np.unique(power_beam_healpix2.freq_array):
assert f in freqs_to_keep
assert len(pols_to_keep) == power_beam_healpix2.Npols
for p in pols_to_keep:
assert p in power_beam_healpix2.polarization_array
for p in np.unique(power_beam_healpix2.polarization_array):
assert p in pols_to_keep
assert uvutils._check_histories(old_history + ' Downselected to '
'specific healpix pixels, frequencies, '
'polarizations using pyuvdata.',
power_beam_healpix2.history)
# repeat for efield beam
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files[0], beam_type='efield', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1', feed_pol=['x'],
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
# generate more frequencies for testing by copying and adding
new_beam = copy.deepcopy(efield_beam)
new_beam.freq_array = efield_beam.freq_array + efield_beam.Nfreqs * 1e6
efield_beam += new_beam
efield_beam.interpolation_function = 'az_za_simple'
efield_beam.to_healpix()
old_history = efield_beam.history
freqs_to_keep = np.array([efield_beam.freq_array[0, 0]])
feeds_to_keep = ['x']
efield_beam2 = efield_beam.select(pixels=pixels_to_keep,
frequencies=freqs_to_keep,
feeds=feeds_to_keep,
inplace=False)
assert len(pixels_to_keep) == efield_beam2.Npixels
for pi in pixels_to_keep:
assert pi in efield_beam2.pixel_array
for pi in np.unique(efield_beam2.pixel_array):
assert pi in pixels_to_keep
assert freqs_to_keep.size == efield_beam2.Nfreqs
for f in freqs_to_keep:
assert f in efield_beam2.freq_array
for f in np.unique(efield_beam2.freq_array):
assert f in freqs_to_keep
assert len(feeds_to_keep) == efield_beam2.Nfeeds
for f in feeds_to_keep:
assert f in efield_beam2.feed_array
for f in np.unique(efield_beam2.feed_array):
assert f in feeds_to_keep
assert uvutils._check_histories(old_history + ' Downselected to '
'specific healpix pixels, frequencies, '
'feeds using pyuvdata.',
efield_beam2.history)
# -------------------
# Test adding a different combo with healpix
beam_ref = copy.deepcopy(power_beam_healpix)
beam1 = power_beam_healpix.select(
pixels=power_beam_healpix.pixel_array[0:power_beam_healpix.Npixels // 2],
freq_chans=0, inplace=False)
beam2 = power_beam_healpix.select(
pixels=power_beam_healpix.pixel_array[power_beam_healpix.Npixels // 2:],
freq_chans=1, inplace=False)
beam1 += beam2
assert uvutils._check_histories(power_beam_healpix.history
+ ' Downselected to specific healpix '
'pixels, frequencies using pyuvdata. '
'Combined data along healpix pixel, '
'frequency axis using pyuvdata.',
beam1.history)
# Zero out missing data in reference object
beam_ref.data_array[:, :, :, 0, power_beam_healpix.Npixels // 2:] = 0.0
beam_ref.data_array[:, :, :, 1, :power_beam_healpix.Npixels // 2] = 0.0
beam1.history = power_beam_healpix.history
assert beam1 == beam_ref
# Test adding another combo with efield
beam_ref = copy.deepcopy(efield_beam)
beam1 = efield_beam.select(freq_chans=0, feeds=efield_beam.feed_array[0],
inplace=False)
beam2 = efield_beam.select(freq_chans=1, feeds=efield_beam.feed_array[1],
inplace=False)
beam1 += beam2
assert uvutils._check_histories(efield_beam.history
+ ' Downselected to specific frequencies, '
'feeds using pyuvdata. Combined data '
'along frequency, feed axis using pyuvdata.',
beam1.history)
# Zero out missing data in reference object
beam_ref.data_array[:, :, 1, 0, :] = 0.0
beam_ref.data_array[:, :, 0, 1, :] = 0.0
beam1.history = efield_beam.history
assert beam1 == beam_ref
# Add without inplace
beam1 = efield_beam.select(pixels=efield_beam.pixel_array[0:efield_beam.Npixels // 2],
inplace=False)
beam2 = efield_beam.select(pixels=efield_beam.pixel_array[efield_beam.Npixels // 2:],
inplace=False)
beam1 = beam1 + beam2
assert uvutils._check_histories(efield_beam.history
+ ' Downselected to specific healpix pixels '
'using pyuvdata. Combined data '
'along healpix pixel axis using pyuvdata.',
beam1.history)
beam1.history = efield_beam.history
assert beam1 == efield_beam
# ---------------
# Test error: adding overlapping data with healpix
beam1 = copy.deepcopy(power_beam_healpix)
beam2 = copy.deepcopy(power_beam_healpix)
pytest.raises(ValueError, beam1.__iadd__, beam2)
# ---------------
# Test beam area methods
# Check that non-peak normalizations error
pytest.raises(ValueError, power_beam_healpix.get_beam_area)
pytest.raises(ValueError, power_beam_healpix.get_beam_sq_area)
healpix_norm = copy.deepcopy(power_beam_healpix)
healpix_norm.data_normalization = 'solid_angle'
pytest.raises(ValueError, healpix_norm.get_beam_area)
pytest.raises(ValueError, healpix_norm.get_beam_sq_area)
# change it back to 'physical'
healpix_norm.data_normalization = 'physical'
# change it to peak for rest of checks
healpix_norm.peak_normalize()
# Check sizes of output
numfreqs = healpix_norm.freq_array.shape[-1]
beam_int = healpix_norm.get_beam_area(pol='xx')
beam_sq_int = healpix_norm.get_beam_sq_area(pol='xx')
assert beam_int.shape[0] == numfreqs
assert beam_sq_int.shape[0] == numfreqs
# Check for the case of a uniform beam over the whole sky
hp_obj = HEALPix(nside=healpix_norm.nside)
dOmega = hp_obj.pixel_area.to('steradian').value
npix = healpix_norm.Npixels
healpix_norm.data_array = np.ones_like(healpix_norm.data_array)
assert np.allclose(np.sum(healpix_norm.get_beam_area(pol='xx')), numfreqs * npix * dOmega)
healpix_norm.data_array = 2. * np.ones_like(healpix_norm.data_array)
assert np.allclose(np.sum(healpix_norm.get_beam_sq_area(pol='xx')), numfreqs * 4. * npix * dOmega)
# check XX and YY beam areas work and match to within 5 sigfigs
XX_area = healpix_norm.get_beam_area('XX')
xx_area = healpix_norm.get_beam_area('xx')
assert np.allclose(xx_area, XX_area)
YY_area = healpix_norm.get_beam_area('YY')
assert np.allclose(YY_area / XX_area, np.ones(numfreqs))
# nt.assert_almost_equal(YY_area / XX_area, 1.0, places=5)
XX_area = healpix_norm.get_beam_sq_area("XX")
YY_area = healpix_norm.get_beam_sq_area("YY")
assert np.allclose(YY_area / XX_area, np.ones(numfreqs))
# nt.assert_almost_equal(YY_area / XX_area, 1.0, places=5)
# Check that if pseudo-Stokes I (pI) is in the beam polarization_array, it just uses it
healpix_norm.polarization_array = [1, 2]
# nt.assert_almost_equal(np.sum(healpix_norm.get_beam_area()), 2. * numfreqs * npix * dOmega)
# nt.assert_almost_equal(np.sum(healpix_norm.get_beam_sq_area()), 4. * numfreqs * npix * dOmega)
# Check error if desired pol is allowed but isn't in the polarization_array
pytest.raises(ValueError, healpix_norm.get_beam_area, pol='xx')
pytest.raises(ValueError, healpix_norm.get_beam_sq_area, pol='xx')
# Check polarization error
healpix_norm.polarization_array = [9, 18, 27, -4]
pytest.raises(ValueError, healpix_norm.get_beam_area, pol='xx')
pytest.raises(ValueError, healpix_norm.get_beam_sq_area, pol='xx')
healpix_norm_fullpol = efield_beam.efield_to_power(inplace=False)
healpix_norm_fullpol.peak_normalize()
XX_area = healpix_norm_fullpol.get_beam_sq_area("XX")
YY_area = healpix_norm_fullpol.get_beam_sq_area("YY")
XY_area = healpix_norm_fullpol.get_beam_sq_area("XY")
YX_area = healpix_norm_fullpol.get_beam_sq_area("YX")
# check if XY beam area is equal to beam YX beam area
assert np.allclose(XY_area, YX_area)
# check if XY/YX beam area is less than XX/YY beam area
assert np.all(np.less(XY_area, XX_area))
assert np.all(np.less(XY_area, YY_area))
assert np.all(np.less(YX_area, XX_area))
assert np.all(np.less(YX_area, YY_area))
# Check if power is scalar
healpix_vec_norm = efield_beam.efield_to_power(keep_basis_vector=True,
calc_cross_pols=False,
inplace=False)
healpix_vec_norm.peak_normalize()
pytest.raises(ValueError, healpix_vec_norm.get_beam_area)
pytest.raises(ValueError, healpix_vec_norm.get_beam_sq_area)
# Check only power beams accepted
pytest.raises(ValueError, efield_beam.get_beam_area)
pytest.raises(ValueError, efield_beam.get_beam_sq_area)
# check pseudo-Stokes parameters
efield_beam = UVBeam()
efield_beam.read_cst_beam(cst_files[0], beam_type='efield', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1',
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
efield_beam.interpolation_function = 'az_za_simple'
efield_beam.to_healpix()
efield_beam.efield_to_pstokes()
efield_beam.peak_normalize()
pI_area = efield_beam.get_beam_sq_area("pI")
pQ_area = efield_beam.get_beam_sq_area("pQ")
pU_area = efield_beam.get_beam_sq_area("pU")
pV_area = efield_beam.get_beam_sq_area("pV")
assert np.all(np.less(pQ_area, pI_area))
assert np.all(np.less(pU_area, pI_area))
assert np.all(np.less(pV_area, pI_area))
# check backwards compatability with pstokes nomenclature and int polnum
I_area = efield_beam.get_beam_area('I')
pI_area = efield_beam.get_beam_area('pI')
area1 = efield_beam.get_beam_area(1)
assert np.allclose(I_area, pI_area)
assert np.allclose(I_area, area1)
# check efield beam type is accepted for pseudo-stokes and power for linear polarizations
pytest.raises(ValueError, healpix_vec_norm.get_beam_sq_area, 'pI')
pytest.raises(ValueError, efield_beam.get_beam_sq_area, 'xx')
def test_get_beam_functions():
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files[0], beam_type='power', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1',
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
pytest.raises(AssertionError, power_beam._get_beam, 'xx')
# Check only healpix accepted (HEALPix checks are in test_healpix)
# change data_normalization to peak for rest of checks
power_beam.peak_normalize()
pytest.raises(ValueError, power_beam.get_beam_area)
pytest.raises(ValueError, power_beam.get_beam_sq_area)
if healpix_installed:
power_beam = UVBeam()
power_beam.read_cst_beam(cst_files[0], beam_type='power', frequency=150e6,
telescope_name='TEST', feed_name='bob',
feed_version='0.1',
model_name='E-field pattern - Rigging height 4.9m',
model_version='1.0')
power_beam.interpolation_function = 'az_za_simple'
power_beam.to_healpix()
power_beam.peak_normalize()
power_beam._get_beam('xx')
pytest.raises(ValueError, power_beam._get_beam, 4)
| 48.813609
| 116
| 0.634776
|
9ea09c29adba3bedefbdfbaa8ea2b295d98d220c
| 3,682
|
py
|
Python
|
restalchemy/validators.py
|
restalchemy/restalchemy
|
dcbe5312832f35ad4fa7262516c32579e0acd214
|
[
"0BSD"
] | 1
|
2019-01-16T18:40:01.000Z
|
2019-01-16T18:40:01.000Z
|
restalchemy/validators.py
|
restalchemy/restalchemy
|
dcbe5312832f35ad4fa7262516c32579e0acd214
|
[
"0BSD"
] | null | null | null |
restalchemy/validators.py
|
restalchemy/restalchemy
|
dcbe5312832f35ad4fa7262516c32579e0acd214
|
[
"0BSD"
] | null | null | null |
from datetime import datetime
from pyramid.config import Configurator
from restalchemy.exceptions import AttributeWrong
from sqlalchemy import DECIMAL, DateTime, Enum, Float, Integer, String, event
from .model import RestalchemyBase
def validate_int(column, value):
if isinstance(value, str):
try:
value = int(value)
except ValueError:
raise AttributeWrong(column.key, f'"{value}" is not a valid number')
if type(value) is not int: # isinstance doesn't work because bool is subclass of int
raise AttributeWrong(column.key, '"{value}" has to be a number')
return value
def validate_float(column, value):
if isinstance(value, str):
try:
value = float(value)
except ValueError:
raise AttributeWrong(column.key, f'"{value}" is not a valid float')
if type(value) is not float: # isinstance doesn't work because bool is subclass of int
raise AttributeWrong(column.key, f'"{value}" has to be a float')
return value
def validate_string(column, value):
if not isinstance(value, str):
raise AttributeWrong(column.key, f'"{value}" is not a string')
return value
def validate_enum(column, value):
enums = column.type.enums
if value not in enums:
raise AttributeWrong(column.key, f'"{value}" is not one of: {", ".join(e for e in enums)}.')
return value
def validate_datetime(column, value):
if isinstance(value, datetime):
return value
if not isinstance(value, str):
raise AttributeWrong(column.key, "Datetime must be a string")
if value == "0000-00-00T00:00:00": # mysql allows 0000-00-00 dates for invalid dates
return value
orig_value = value
# Allow ' ' or 'T' as date-time separator and only allow UTC timezone ('Z' or '+0000')
value = value.replace("T", " ").replace("Z", "+")
value, _, tz = value.partition("+")
if tz.strip("0:") != "":
raise AttributeWrong(column.key, "Only UTC (+0000) datetimes supported")
_, _, microseconds = value.partition(".")
# We (or better, Python) only supports microseconds (6 digits)
if len(microseconds) > 6:
value = value[: -(len(microseconds) - 6)]
try:
return datetime.strptime(value, "%Y-%m-%d %H:%M:%S.%f")
except ValueError:
pass
try:
return datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
except ValueError:
pass
try:
return datetime.strptime(value, "%Y-%m-%d")
except ValueError:
raise AttributeWrong(column.key, f'"{orig_value}" is not a valid datetime')
validators = {
DateTime: validate_datetime,
DECIMAL: validate_float,
Float: validate_float,
Integer: validate_int,
String: validate_string,
Enum: validate_enum,
}
def validate(value, column):
"""Check if `value` is a valid sqlalchemy type for `column`."""
try:
validator = validators.get(column.type.__class__)
except Exception:
return value
if validator and value is not None:
return validator(column, value)
return value
def includeme(config: Configurator):
# This event is called whenever an attribute on a class is instrumented
@event.listens_for(RestalchemyBase, "attribute_instrument")
def configure_listener(class_, key, inst):
if not hasattr(inst.property, "columns"):
return
# This event is called whenever a "set"
# occurs on that instrumented attribute
@event.listens_for(inst, "set", retval=True)
def set_(instance, value, oldvalue, initiator):
return validate(value, inst.property.columns[0])
| 30.941176
| 100
| 0.652091
|
115b60349ef93e835e9186c49a97c3d5585d1abe
| 2,788
|
py
|
Python
|
tests/test_metadata.py
|
adrien-berchet/sphinx-bluebrain-theme
|
8ecab402e8ba09da45dd423de9395173ddca7337
|
[
"MIT"
] | 2
|
2020-08-08T18:47:51.000Z
|
2021-07-23T13:56:51.000Z
|
tests/test_metadata.py
|
adrien-berchet/sphinx-bluebrain-theme
|
8ecab402e8ba09da45dd423de9395173ddca7337
|
[
"MIT"
] | 15
|
2020-04-09T13:24:12.000Z
|
2022-03-29T08:24:45.000Z
|
tests/test_metadata.py
|
adrien-berchet/sphinx-bluebrain-theme
|
8ecab402e8ba09da45dd423de9395173ddca7337
|
[
"MIT"
] | 2
|
2021-04-22T08:15:13.000Z
|
2021-12-22T08:23:37.000Z
|
"""Tests for metadata generation utilities."""
from contextlib import contextmanager
import json
import os
import shutil
import sys
import tempfile
from nose import tools as nt # pylint: disable=import-error
from sphinx_bluebrain_theme.utils import metadata
@contextmanager
def setup_tempdir(prefix):
"""Create a temporary directory which will be cleaned up."""
temp_dir = tempfile.mkdtemp(prefix=prefix)
try:
yield temp_dir
finally:
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
def test_temp_change_working_directory():
"""Test changing the working directory temporarily."""
current_wd = os.getcwd()
with metadata.change_cwd("/"):
nt.eq_(os.getcwd(), "/")
nt.eq_(os.getcwd(), current_wd)
def test_write_metadata_files():
"""Test that the metadata files are written and their content is correct."""
with setup_tempdir("docs-path") as docs_path:
metadata_dict = {"name": "test-data"}
metadata.write_metadata(metadata_dict, docs_path)
md_file = os.path.join(docs_path, "metadata.md")
json_file = os.path.join(docs_path, "metadata.json")
nt.assert_true(os.path.isfile(md_file))
nt.assert_true(os.path.isfile(json_file))
with open(md_file, encoding="utf8") as file_:
content = file_.read()
expect = """---
name: test-data
---
"""
nt.eq_(content, expect)
with open(json_file, encoding="utf8") as file_:
content = file_.read()
expect = """{"name": "test-data"}"""
nt.eq_(content, expect)
def test_add_to_path():
"""Test using a context manager to temporarily add to the system path."""
path = "/path/to/nowhere"
current_path = list(sys.path)
nt.assert_not_equal(sys.path[0], path)
with metadata.add_to_path(path):
nt.eq_(sys.path[0], path)
nt.assert_not_equal(sys.path[0], path)
nt.eq_(current_path, sys.path)
def test_get_metadata_from_json():
"""Test getting metadata from a json file."""
output = {
"name": "name",
"version": "version",
"description": "description",
"homepage": "homepage",
"repository": {"url": "repository"},
"bugs": {"url": "issuesurl"},
"license": "license",
"author": "maintainers",
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as fd:
fd.write(json.dumps(output))
fd.flush()
# get the metadata
md = metadata.get_metadata_from_json(fd.name)
# check contributors is blank
nt.eq_(md["contributors"], "")
del md["contributors"]
# check all values are the same as their keys
for k, v in md.items():
nt.eq_(k, v)
| 27.067961
| 80
| 0.624821
|
178649b04ee976918cde7294b93c9476e15e6ba6
| 1,858
|
py
|
Python
|
core/migrations/0002_auto_20170524_1929.py
|
zarathon/gddmaker
|
58e185e6baa10bdcb863acdf3b946297adeb65c8
|
[
"MIT"
] | 2
|
2017-09-25T18:12:55.000Z
|
2018-05-25T09:48:29.000Z
|
core/migrations/0002_auto_20170524_1929.py
|
zarathon/gddmaker
|
58e185e6baa10bdcb863acdf3b946297adeb65c8
|
[
"MIT"
] | null | null | null |
core/migrations/0002_auto_20170524_1929.py
|
zarathon/gddmaker
|
58e185e6baa10bdcb863acdf3b946297adeb65c8
|
[
"MIT"
] | 1
|
2019-09-25T23:25:27.000Z
|
2019-09-25T23:25:27.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-24 19:29
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='character',
name='description',
field=ckeditor.fields.RichTextField(),
),
migrations.AlterField(
model_name='character',
name='name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='game',
name='description',
field=ckeditor.fields.RichTextField(),
),
migrations.AlterField(
model_name='level',
name='description',
field=ckeditor.fields.RichTextField(),
),
migrations.AlterField(
model_name='level',
name='name',
field=models.CharField(max_length=80),
),
migrations.AlterField(
model_name='level',
name='script',
field=ckeditor.fields.RichTextField(),
),
migrations.AlterField(
model_name='mechanic',
name='description',
field=ckeditor.fields.RichTextField(),
),
migrations.AlterField(
model_name='mechanic',
name='name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='media',
name='description',
field=ckeditor.fields.RichTextField(),
),
migrations.AlterField(
model_name='media',
name='name',
field=models.CharField(max_length=255),
),
]
| 27.731343
| 51
| 0.542519
|
fe6cb8ccbe6293f6e11d2e3e9d068fb53f98e712
| 92
|
py
|
Python
|
web2py/parameters_8000.py
|
seadsystem/website
|
d7e8110c012d94c494895fde9951eef728266798
|
[
"MIT"
] | 5
|
2016-03-20T07:07:17.000Z
|
2018-04-12T16:34:12.000Z
|
web2py/parameters_8000.py
|
grant523/website
|
1b5351e638a15a1c0c68e55f9162d1b8e62974bb
|
[
"MIT"
] | 22
|
2015-11-04T23:11:41.000Z
|
2016-05-19T00:08:54.000Z
|
web2py/parameters_8000.py
|
grant523/website
|
1b5351e638a15a1c0c68e55f9162d1b8e62974bb
|
[
"MIT"
] | 4
|
2015-10-29T01:46:44.000Z
|
2018-04-09T22:05:28.000Z
|
password="pbkdf2(1000,20,sha512)$b5e8c23b5f1c9ac6$cb4563c5e0260d493a465ee1d135ac554457f37a"
| 46
| 91
| 0.891304
|
1fcdf014094af7ce40dd66580466ddb36f4de759
| 3,722
|
py
|
Python
|
rl/core/oracles/oracles.py
|
gtrll/librl
|
39709c3e485e232865b3e08b7211cd9d871c666a
|
[
"MIT"
] | 5
|
2020-07-14T23:01:53.000Z
|
2020-12-09T08:11:29.000Z
|
rl/core/oracles/oracles.py
|
chinganc/mamba
|
e8adf0cf91660aed2c025508137a14f9d062248c
|
[
"MIT"
] | 1
|
2022-03-27T04:43:31.000Z
|
2022-03-27T04:43:31.000Z
|
rl/core/oracles/oracles.py
|
chinganc/mamba
|
e8adf0cf91660aed2c025508137a14f9d062248c
|
[
"MIT"
] | 4
|
2020-08-05T14:13:26.000Z
|
2022-02-26T00:46:03.000Z
|
# Copyright (c) 2019 Georgia Tech Robot Learning Lab
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from rl.core.function_approximators.normalizers import NormalizerStd, Normalizer
from rl.core.oracles import Oracle
class LikelihoodRatioOracle(Oracle):
"""
An Oracle based on the loss function below: if use_log_loss is True
E_{x} E_{y ~ q | x} [ w * log p(y|x) * f(x, y) ]
otherwise, it uses
E_{x} E_{y ~ q | x} [ p(y|x)/q(y|x) * f(x, y) ]
where p is the variable distribution, q is a constant
distribution, and f is a scalar function.
When w = p/q, then the gradients of two loss functions are equivalent.
The expectation is approximated by unbiased samples from q. To minimize
the variance of sampled gradients, the implementation of 'grad' is
based on a normalizer, which can shift, rescale, or clip f.
"""
def __init__(self, logp_fun, logp_grad,
nor=None, biased=False,
use_log_loss=False, normalized_is=False):
"""
logp_fun: variable -> logp
logp_grad: variable, f -> E[ f \nabla logp]
"""
self._logp_fun = logp_fun
self._logp_grad = logp_grad # sum
self._biased = biased
self._use_log_loss = use_log_loss
self._normalized_is = normalized_is # normalized importance sampling
if nor is None:
if biased: # use the current samples
self._nor = NormalizerStd((1,), unscale=True, clip_thre=None, momentum=0.0)
else: # use a moving average
self._nor = NormalizerStd((1,), unscale=True, clip_thre=None, momentum=None)
else:
assert isinstance(nor, Normalizer)
self._nor = nor
def fun(self, x):
f = self._f
w_or_logq = self._w_or_logq
logp = self._logp_fun(x)
if self._use_log_loss: # w_or_logq is w
w = w_or_logq
loss = np.sum(w *f *logp)
else: # w_or_logq is logq
w = np.exp(logp - w_or_logq)
loss = np.sum(w*f)
if self._normalized_is: # normalized importance sampling
return loss / np.sum(w)
else: # regular importance sampling
return loss / f.shape[0]
def grad(self, x):
f = self._f
w_or_logq = self._w_or_logq
if self._use_log_loss: # w_or_logq is w
w = w_or_logq
else: # w_or_logq is logq
logp = self._logp_fun(x)
w = np.exp(logp - w_or_logq)
wf = w*f
print('w', w.min(), w.max(), w.mean())
print('wf', wf.min(), wf.max(), wf.mean())
grad = self._logp_grad(x, wf) # sum
if self._normalized_is: # normalized importance sampling
return grad / np.sum(w)
else: # regular importance sampling
return grad / f.shape[0]
def update(self, f, w_or_logq, update_nor=True):
""" Update the function with Monte-Carlo samples.
f: sampled function values
w_or_logq: importance weight or the log probability of the sampling distribution
update_nor: whether to update the normalizer using the current sample
"""
if self._biased:
self._nor.update(f)
f_normalized = self._nor.normalize(f) # cv
if self._use_log_loss: # w_or_logq is w
assert np.all(w_or_logq >= 0)
# these are treated as constants
assert f_normalized.shape==w_or_logq.shape
self._f = f_normalized
self._w_or_logq = w_or_logq
if not self._biased and update_nor:
self._nor.update(f)
| 36.851485
| 92
| 0.60129
|
526c8dec06be0ef4de222fa539f2e1e1f27c9280
| 2,213
|
py
|
Python
|
external_data_clean.py
|
temilaj/computer-vision-network
|
c1fa83690172226d663d8e1be61f2282792db7c2
|
[
"MIT"
] | 1
|
2020-11-12T12:30:13.000Z
|
2020-11-12T12:30:13.000Z
|
external_data_clean.py
|
temilaj/computer-vision-network
|
c1fa83690172226d663d8e1be61f2282792db7c2
|
[
"MIT"
] | null | null | null |
external_data_clean.py
|
temilaj/computer-vision-network
|
c1fa83690172226d663d8e1be61f2282792db7c2
|
[
"MIT"
] | 1
|
2020-11-12T12:28:59.000Z
|
2020-11-12T12:28:59.000Z
|
import os
import pandas as pd
def intersection(lst1, lst2):
return list(set(lst1) & set(lst2))
def find_img_index(iname, emotion_list):
return emotion_list.index(iname)
PATH_to_DIR = os.getcwd()
df = pd.read_csv(PATH_to_DIR + '/data/legend.csv')
# del df['user.id']
#
# emotions = dict.fromkeys(df['emotion'])
#
# print('* unique emotions (unprocessed) :')
# for k in emotions.keys():
# print(f"\t* {k}")
#
# df['emotion'] = df['emotion'].apply(lambda x: str(x).lower())
#
# emotions = dict.fromkeys(df['emotion'])
# print('* unique emotions (processed) :')
# for k in emotions.keys():
# print(f"\t* {k}")
#
# for idx, em in enumerate(df['emotion']):
# if em in ['anger', 'disgust', 'fear', 'sadness', 'contempt']:
# df.iloc[idx, 1] = 'negative'
# elif em in ['surprise', 'happiness']:
# df.iloc[idx, 1] = 'positive'
#
# emotions = dict.fromkeys(df['emotion'])
# print('* unique emotions (after mapping) :')
# for k in emotions.keys():
# print(f"\t* {k}")
#
# print(df.head(10))
#
# df.to_csv(PATH_to_DIR + '/data/legend.csv')
del df['Unnamed: 0']
print(df.head())
list_of_image_names = os.listdir(PATH_to_DIR + '/data/images/')
emotions = df['emotion']
print(f"emotion data size: {len(emotions)}\nimage data size: {len(list_of_image_names)}")
print(f"Length of intersection: {len(intersection(list_of_image_names, df['image']))}")
# files_to_delete = []
# intersect = intersection(list_of_image_names, df['image'])
#
# for f in list_of_image_names:
# if f not in intersect:
# files_to_delete.append(f)
#
# print(f"First 10 files to delete: {files_to_delete[0:10]}")
#
# for iname in files_to_delete:
# os.remove(PATH_to_DIR + '/data/images/' + iname)
#
# print('** deleted un-labeled files **')
for img_name in list_of_image_names:
idx = find_img_index(img_name, list(df['image']))
emotion_label = df.iloc[idx, 1]
raw_name = img_name.split('.jpg')[0]
new_name = raw_name + '_' + emotion_label + '.jpg'
os.rename(PATH_to_DIR + '/data/images/' + img_name,
PATH_to_DIR + '/data/images/' + new_name)
print('** double check with first few name **')
print(os.listdir(PATH_to_DIR + '/data/images/')[0:10])
| 26.035294
| 89
| 0.645278
|
d604fa35feebd58c5fdb31bfcdc77405693f7deb
| 3,869
|
py
|
Python
|
tests/testapp/tests/test_views.py
|
fossabot/django-renderpdf
|
13ea0d0eb6e14e31a5973c1ee52adcc769e52482
|
[
"ISC"
] | null | null | null |
tests/testapp/tests/test_views.py
|
fossabot/django-renderpdf
|
13ea0d0eb6e14e31a5973c1ee52adcc769e52482
|
[
"ISC"
] | null | null | null |
tests/testapp/tests/test_views.py
|
fossabot/django-renderpdf
|
13ea0d0eb6e14e31a5973c1ee52adcc769e52482
|
[
"ISC"
] | null | null | null |
from unittest.mock import call, patch
from django.test import RequestFactory, TestCase
from testapp import views
factory = RequestFactory()
class PromptDownloadTestCase(TestCase):
def test_prompt_download(self):
request = factory.get('/some_view')
response = views.PromptDownloadView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertIn(
b'Content-Type: application/pdf',
response.serialize_headers().splitlines()
)
self.assertIn(
b'Content-Disposition: attachment; filename="myfile.pdf"',
response.serialize_headers().splitlines(),
)
# Assert that response looks like a PDF
self.assertTrue(response.content.startswith(b'%PDF-1.'))
def test_dont_prompt_download(self):
request = factory.get('/some_view')
response = views.NoPromptDownloadView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertIn(
b'Content-Type: application/pdf',
response.serialize_headers().splitlines()
)
self.assertNotIn(b'Content-Disposition:', response.serialize_headers())
# Assert that response looks like a PDF
self.assertTrue(response.content.startswith(b'%PDF-1.'))
class ForceHTMLTestCase(TestCase):
def test_force_html_allowed(self):
request = factory.get('/some_view?html=true')
response = views.AllowForceHtmlView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(b'Hi!\n', response.content)
self.assertIn(
b'Content-Type: text/html; charset=utf-8',
response.serialize_headers().splitlines()
)
def test_no_force_html_allowed(self):
request = factory.get('/some_view')
response = views.AllowForceHtmlView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertIn(
b'Content-Type: application/pdf',
response.serialize_headers().splitlines()
)
# Assert that response looks like a PDF
self.assertTrue(response.content.startswith(b'%PDF-1.'))
def test_force_html_disallowed(self):
request = factory.get('/some_view?html=true')
response = views.DisallowForceHtmlView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertIn(
b'Content-Type: application/pdf',
response.serialize_headers().splitlines()
)
# Assert that response looks like a PDF
self.assertTrue(response.content.startswith(b'%PDF-1.'))
def test_no_force_html_disallowed(self):
request = factory.get('/some_view')
response = views.DisallowForceHtmlView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertIn(
b'Content-Type: application/pdf',
response.serialize_headers().splitlines()
)
# Assert that response looks like a PDF
self.assertTrue(response.content.startswith(b'%PDF-1.'))
class CustomUrlFetcherTestCase(TestCase):
pass # TODO
class StaticFileResolutionTestCase(TestCase):
def test_url_fetcher_used(self):
request = factory.get('/some_view')
with patch(
'django_renderpdf.helpers.staticfiles_url_fetcher',
return_value={
'string': 'html { margin: 0; }',
'mime_type': 'text/css',
},
spec=True,
) as fetcher:
response = views.TemplateWithStaticFileView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(fetcher.call_count, 1)
self.assertEqual(
fetcher.call_args,
call('/static/path/not/relevant.css'),
)
| 33.938596
| 79
| 0.642802
|
dfb26c4e18c8120ddbf26648330c30f2b7a97eae
| 2,175
|
py
|
Python
|
asreview/entry_points/algorithms.py
|
BartJanBoverhof/asreview
|
33894ebebf7bda2c552b707c3725c0287ac00369
|
[
"Apache-2.0"
] | null | null | null |
asreview/entry_points/algorithms.py
|
BartJanBoverhof/asreview
|
33894ebebf7bda2c552b707c3725c0287ac00369
|
[
"Apache-2.0"
] | null | null | null |
asreview/entry_points/algorithms.py
|
BartJanBoverhof/asreview
|
33894ebebf7bda2c552b707c3725c0287ac00369
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asreview.entry_points.base import BaseEntryPoint
from asreview.models.balance import list_balance_strategies
from asreview.models import list_classifiers
from asreview.models.feature_extraction import list_feature_extraction
from asreview.models.query import list_query_strategies
def _format_algorithm(values, name, description):
s = f" {name: <20}Available {description}:\n\n"
result = []
for x in values:
result.append(" " * 22 + f"{x}")
s += "\n".join(result)
s += "\n\n"
return s
class AlgorithmsEntryPoint(BaseEntryPoint):
description = "Available active learning algorithms for ASReview."
def execute(self, argv):
s = "Available active learning algorithms for ASReview. \n\n"
# classifiers
s += _format_algorithm(
values=list_classifiers(),
name="classifiers",
description="classification algorithms"
)
# query_strategies
s += _format_algorithm(
values=list_query_strategies(),
name="query_strategies",
description="query strategies"
)
# balance_strategies
s += _format_algorithm(
values=list_balance_strategies(),
name="balance_strategies",
description="balance strategies"
)
# feature_extraction
s += _format_algorithm(
values=list_feature_extraction(),
name="feature_extraction",
description="feature extraction algorithms"
)
print(s)
| 29.794521
| 74
| 0.673563
|
44d7bbdebddeee4f015b6e2360c1152711ada554
| 9,753
|
py
|
Python
|
venv/Lib/site-packages/olefile/README.html.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
venv/Lib/site-packages/olefile/README.html.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
venv/Lib/site-packages/olefile/README.html.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXX XXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXX XXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXX XXXXXXX XX XXXXXX XXXX XXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXX XXXXX XXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XXXX XXXXXX XXXXXX XX XXXXXXXX XXXXXXXX XXXX XXXXXXXX XXXX XX XXXXXXXXX XXXXXX XXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX XX XX XXXXXX XXXXX XXXXXX XXXXX XXXXXXXX XXX XXXXXXXX XXXXXX XXXXXXX XXXXXXXXX XXXXXXXXXXXX XXXXXXX XXXXXXXXXX XXXX XXXXXXXX XXXXXX XXXXXXXXX XXXXXXXXXX XXXXXX XXXXXXXX
XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX X XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXX X XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXX XXX XXXXXXX XXX XXXX XX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX
XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXX XXXXXXXXX XXXXXXX XXXXXXX XXX XXXXXX XXX XXXXXXXXXXX XXXXX XXXXXXX XXX XXXXXXXXXX XXXXXXX XXX XXXXXXXXX XXXXXXXXX XXXXXXX XXX XXXX XXXXXXXXX XXXXXXXXXXX XXXXX XXXXXXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXX XXX XXXXXXXXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXX XXXXXXX XXXXXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXX XXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXX XX XXXXXXXXX XXXXXX XXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXXXXX XXXXXXXX XX XXXXXXX XXXXXXXXXX XX XXXXXXXXXXXXXX XXXXX XX XXXXXX XXX XXXXXX XXXXX XXXXXXX XX XXXXXXXXX XXXXX XXX XX XXXXXXX XXXX XXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXX XXX XXXXXXXXX XXX XXXXXXX XXX XXXXX XXXXXX XX XXXX XXXXXXXX XXXXX XXXXXXXXX XXX XXXXXX XX XXXXX XXXXXXX XXX XXXXXX XXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXXXX XXXXXXXXXXXX XX XXXXXXXX XXXXX XXXXXXX XXXXX XXXXXXX XXX XXXXXXX XXXXXXX XXXXXXX XXX XXX XXXXXXXX XXXXXXXX XXX XXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXX XXXXXXX XXX XXXXX XXXXX XXXX XX XXXXXXXX XXXXXX XX XXXX XXXXXXXXXXXXXX XXXXXXX XXXXXX XXX XXXX XXXXXX XXXXX XXXX XXXXXXX XXXX XXXXXX XXX XXXXXXXX XXXXX XXXXX XXX XXXXXX X XXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXX XXXXXXXXXX XXXX XXXXXX XXXX XXXXXX XX XXXXXX XXXXXX XXX XXX XXXX XX XXX XXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXX XXXXXXX XX XXXXX XXXXXXXXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXX XX XXXXXXX XXXXXXXXX XXXXX XXXXXXX XX XXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXXXXX XXXXXXXX XXXXXXXXXXX XXXXXXXXXX XXXXXXX XXX XXXXXXXXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXX XXXXXXXX XX XXXXXXX XXXXXXXX XXXXXXXXXXXXX XXXXXX XXX XXXXXXXXXXX XXXXXXX XXXXXXXX XXXXXXXXXXXXX XX XXXXXXX XXXXXXXXXX XX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXX XX XXXXXXXX XXXXX XXXXX XX XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXX XXXXXXX XXX XXXXXXXXX XXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXX XXXXXX XX XXXX XX XXXXXXXXXXXXXX XXX XX XXXXXX XXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXX XXXXXX XX XXXXXXXXX XX XXXX XXXXXXXXXXXXX XXX XXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXX XXXXXXX XXX XXXXXXXXXX XXXXX XXXX XX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXX XXXXXX XX XXXXXXXXXXXXXX XXXX XXXXXXXX XX XXX XXXXX XXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XXXXX XXXXXXX XXX XX XXXX XXXXXXXXX XXXXXXX XX XXX XX XXX XXXXXXX XXX XXXXXXXXX XXX XXXXXXXXX
XXXXXXX XXXXXXXXX XX XXXXXX XXXX XXX XXXX XXXXXXXXXX
XXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXX XXXX XXX XX XXXXXXXXXX XXXXXXXXX XXXX XX XXXXXXXX XX XXXXXX XXXXXXXX XXX XXX XXXXXX XXX XXXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXX XXX XXXXX XXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXXXXXXX XXX XXXXXXXXXXX XXXXXXX XX XXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX
XXXXXXXXXX XXXX XXX XXXXX XXX XXX XXXX XXXX XX XXXXXXXXX XXXXXX XXXXXXX XXXXXX XXXXXXXX XXXXXXX XXXXX XXXXX XXXXX XXXXX XXXXXXXXXX XXXXX XXXXX XXXXX XXXXXXX XXXXXX XXXXX XXXXXXXX XXX XXXXXXXX XXXXXX XXXXXXX XXXXXXXXX XXXXXXXXXXXX XXXXX XXXXXXXXXX XXX XXXXXX XXXXXXX XXXXXXXX XXX XXXXXX XXXXXXXX
XXXXXXXX XXX XXX XXXXXXX XXX XXXXXXXX XXXXXXXXX XX XX XXX XXXXXXXXX
XXXXXXXX XXXXXXX XX XXXXXXXXXX
XXXXXXXXX XXX XXXX XXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XX XXX XXXXXXXXX
XXXXXXXXXXXXX XXXX XXXXXX XXXXXXX XX XXXXXXXXXXXXXXX
XXXXX
XXXXXXXXXX XXX XX XXXX XX XX XXXXXXXXXXX XXXXXXX XX XXXX XXXXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXX XXXXX XXX XXXXXXXXXXX XX XXX XXX XXXXXXX XXX XXXXX XX XXXXXXX XXX XXXXX XX XX XXXXXXX XXXX XXXXXXXXXXX XXX XXXXXXXX XXXXXXXX XXXX XX XXXXXXX XXXXXXXX XXX XXXXXXXXXXX XXXX XXXXXX XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXX XXXXX XXXX XXXXXXX XXX XXXXXXX X XXXXXXXXXXXX XXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX XXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXX XXXXXXX XXXXXXXX XXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXX XXXXXX XXXXX XXX XXXXXXXXXX XXXXX XXXXXXXX XXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX XX XXXXXXXXX XXX XXXXXXXXX XXXXXXXX XXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXX XXX XX XXXXXX XXX XXXXXX XXXXXX XXXX XX XXXXXXX XXXXXXXXXXXXX XXX XXXXXXXXX XXXXXXXX XXXX XXX XXXXXXXXX XXXXXXXXXX XXX XXXXXXXX
XXXX
XXXXXXXXXXXXXXXXXXX XX XXXXXX XXXX XXXX XXXXXX XXX XXXXX XXXXXXXXX XXXXXXX XXXX XXXX XX XXXXXXXXXX XXX XXX XXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XX XXXXXX XXXX XXXX XXXXXXXXX XXX XXXXX XXXXXXXXX XXXXXXX XXXX XXXX XX XXXXXXXXXX XXX XXX XXXXXXXXX XXXXXXXXXX XX XXX XXXXXXXXXXXXX XXXXXX XXXXX XXXXXXXXX XXXXXXXX XXXX XXX XXXXXXXXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXXX XX XXXXXXXX XX XXX XXXXXXXXX XXXXXXX XXX XXXXXXXXXXXX XXXXXXXX XXXXXXXX XXX XXX XXXXXXX XX XXXXXXX XXXXXXXXXXX XXXXXXXXXX XXX XXX XXXXXXX XXX XXX XXXXXXX XXXXXXXXXX XX XXXXXXXXXXXXXXX XXX XXXXXXX XXX X XXXXXXXXXX XXXXXXX XXX XXXXXXXXXXX XX XX XXXXX XXXXX XXX XXXXXXXXX XXXXXX XX XXXXXXXXXXXX XX XXXXXX XXX XXX XXXXXXX XXXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXXXXX XX XXXXXXXXXXXXX XXXXXXX XXXXXXXXXXX XXX XXX XXXXXXX XXX XXXXXXXXXXX XX XXXXXXXXXX XXXXX XX XXXXXXXXX XXXX XX XXXX XXXXX XX XXXXXXXX XX XXXXXXXX XXXXXXXXXXXXX XXXXXXX XXXXXX XXX XX XXX XXXXXX XX XXXXXXXXXX XXXXXXX XX XXXXXXXXX XXXXXX XXXXXXXXXX XX XXXX XXXXXXXXXX XXXXXXXXXX XX XXXXXXXXXX XXXXXXX XX XXX XXX XXX XX XXX XXX XX XXXX XXXXXXXXX XXXX XX XXXXXXX XX XXX XXXXXXXXXXX XX XXXX XXXXXXXXXXX
XXX XX
XXXXXXXXXX XX XXXXX XX XXXXXX XXXX XXXX XXX XXXXXXXXX XXXXXX XX XXX XXXXXX XXXXXXX XXXXXXX XXXXX XXXXXXXXX XX XXXXXXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXXXXX XXXXXXX XXXXX XXXXXX
XXXX
XXXXXXXXXXXXX XXX XXXXXXXXX XX XXXXXX XXXX XXXXXXX
XXXXXXXXXXXXX XXX XXXXXXXXX XX XXXXXXX XXXXXXXXXX
XXXXX
XXXXX XXXXXXXXXX XXXXXX XXXXXX XXXXXXX XXXX XXXXXXXX XXXXXX XXX XXXXXXXXXX XXXXXXXXXXXXXX XXX XXXXX XXXX XXX XXXX XXXXX XXXXXXXXXXX XXX XXXX XXXXXX XXXX XXX XXXXXXXXX XXXXX XXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXX XX XXXX XXXXX XXXXXXX XXX XXXXXXXXXX XXXX XXXXXXXX XXX XXX XXXXXXXXXX XXXXXXXXXXXXX XXX XXX XXXXXXX XXX XXXXXXX XXX XX XXXXXX XXXXXXXX XXXXXXXX XXXX XXX XXXXX XXXXXXXXX XXXXXX XXXXXXX XX XXX XXXXXXX XXX XXXX XXXX XXXX XXXXXXXXX XXXXXX XXX XXXX XXXXXXXXXX XXXXXX XXXXXX XX XXXXXXXXXX XXXXXXXXXXXXXX XXX XXXX XXX XXXX XX XXXXXX XXXX XX XX XXX XXXXXX XXX XX XXXX XX XXXXXXXXXXX XX XXXXXXXXX XXXXXXXXXX XX XXXXXXXXXXXX XX XXX XXXXXXXX XXXXXXX XXXXXXXXX XXXXXXX XXXXX XXXXXXXXXXXXXXX
XXXXXXXXX XXXX XX XXX XXX XXXXXX XXXXXXXXX XXX XXXXXXXXXX XXXX XXXXXX XX XXXX XXXXXXXXX XXXXXXXXX XXX XXXXXXX XXXXXXXXXX XX XXXXXXXXXXXXXXX XXX XXXXXXXX XX XX XXXXX XXXXX XXXXXX XXXX XX XX XXX XXXXXX XX XXXXXX XXX XXX XXXXXXXX XXXXXXXX XX XXXXXXXXXXXXX XXXXXXX XX XXX XXXXXXX XXXXXXXXXX XXXXXXXXX XXXX XXXX XX XXXX XXXX XX XXXXXXXX XXXXXXX XX XX XXXXXX XX XXXXXXXXX XXXXXXXXXX XX XXXXX XXXXXXXX XXXXXXX XXXXXXX XXX XX XX XX XXXXXXXXXX XXXX XXX XXX XX XXXXXXXXXXX XX XXXX XXXXXXXXXXXXX
| 145.567164
| 772
| 0.887829
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.