hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c17e52c3fe60426ceb8799d558d1a3c553bee41
| 1,331
|
py
|
Python
|
setup.py
|
EfficientEra/login-and-pay-with-amazon-sdk-python
|
029175abc9835ba1927cdd04e88209212cee2443
|
[
"Apache-2.0"
] | 1
|
2019-12-01T09:14:26.000Z
|
2019-12-01T09:14:26.000Z
|
setup.py
|
EfficientEra/login-and-pay-with-amazon-sdk-python
|
029175abc9835ba1927cdd04e88209212cee2443
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
EfficientEra/login-and-pay-with-amazon-sdk-python
|
029175abc9835ba1927cdd04e88209212cee2443
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
import pay_with_amazon.version as pwa_version
setup(
name='pay_with_amazon',
packages=['pay_with_amazon'],
version=pwa_version.versions['application_version'],
description='Login and Pay with Amazon Python SDK',
url='https://github.com/amzn/login-and-pay-with-amazon-sdk-python',
download_url='https://github.com/amzn/login-and-pay-with-amazon-sdk-python/tarball/{0}'.format(
pwa_version.versions['application_version']),
author='EPS-DSE',
author_email='pay-with-amazon-sdk@amazon.com',
license='Apache License version 2.0, January 2004',
install_requires=['pyOpenSSL >= 0.11',
'requests >= 2.6.0',
'mock'],
keywords=['Amazon', 'Payments', 'Login', 'Python', 'API', 'SDK'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules']
)
| 42.935484
| 99
| 0.639369
|
3bcb2215a3aa7f4a791e6b80516b1afd33a67096
| 3,060
|
py
|
Python
|
timesketch/lib/analyzers/utils_test.py
|
macdaliot/timesketch
|
f6a4984208f4c39f01efd72e36ddf21f630b6699
|
[
"Apache-2.0"
] | 4
|
2018-11-01T16:13:31.000Z
|
2022-03-18T12:09:25.000Z
|
timesketch/lib/analyzers/utils_test.py
|
macdaliot/timesketch
|
f6a4984208f4c39f01efd72e36ddf21f630b6699
|
[
"Apache-2.0"
] | null | null | null |
timesketch/lib/analyzers/utils_test.py
|
macdaliot/timesketch
|
f6a4984208f4c39f01efd72e36ddf21f630b6699
|
[
"Apache-2.0"
] | 1
|
2021-11-16T00:01:18.000Z
|
2021-11-16T00:01:18.000Z
|
# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for analysis utils."""
from __future__ import unicode_literals
import six
import pandas as pd
from timesketch.lib.testlib import BaseTest
from timesketch.lib.analyzers import utils
class TestAnalyzerUtils(BaseTest):
"""Tests the functionality of the utilities."""
def test_get_domain_from_url(self):
"""Test get_domain_from_url function."""
url = 'http://www.example.com/?foo=bar'
domain = utils.get_domain_from_url(url)
self.assertEqual(domain, 'www.example.com')
def test_get_tld_from_domain(self):
"""Test get_tld_from_domain function."""
domain = 'this.is.a.subdomain.example.com'
tld = utils.get_tld_from_domain(domain)
self.assertEqual(tld, 'example.com')
domain = 'a'
tld = utils.get_tld_from_domain(domain)
self.assertEqual(tld, 'a')
domain = 'example.com'
tld = utils.get_tld_from_domain(domain)
self.assertEqual(tld, 'example.com')
def test_strip_www_from_domain(self):
"""Test strip_www_from_domain function."""
domain = 'www.mbl.is'
stripped = utils.strip_www_from_domain(domain)
self.assertEqual(stripped, 'mbl.is')
domain = 'mbl.is'
stripped = utils.strip_www_from_domain(domain)
self.assertEqual(stripped, domain)
def test_get_cdn_provider(self):
"""Test get_cdn_provider function."""
domain = 'foobar.gstatic.com'
provider = utils.get_cdn_provider(domain)
self.assertIsInstance(provider, six.text_type)
self.assertEqual(provider, 'Google')
domain = 'www.mbl.is'
provider = utils.get_cdn_provider(domain)
self.assertIsInstance(provider, six.text_type)
self.assertEqual(provider, '')
def test_get_events_from_data_frame(self):
"""Test getting all events from data frame."""
lines = [
{'_id': '123', '_type': 'manual', '_index': 'asdfasdf',
'tool': 'isskeid'},
{'_id': '124', '_type': 'manual', '_index': 'asdfasdf',
'tool': 'tong'},
{'_id': '125', '_type': 'manual', '_index': 'asdfasdf',
'tool': 'klemma'},
]
frame = pd.DataFrame(lines)
events = list(utils.get_events_from_data_frame(frame, None))
self.assertEqual(len(events), 3)
ids = [x.event_id for x in events]
self.assertEqual(set(ids), set(['123', '124', '125']))
| 35.172414
| 74
| 0.652288
|
215ae35002af3c2426a7d33004ec4f9389563be3
| 1,669
|
py
|
Python
|
how-to-use-azureml/automated-machine-learning/forecasting-bike-share/forecasting_script.py
|
Jaboo9/MachineLearningNotebooks
|
6fe90ec1bfedcd51da4fa9f709583458cbddcf3c
|
[
"MIT"
] | null | null | null |
how-to-use-azureml/automated-machine-learning/forecasting-bike-share/forecasting_script.py
|
Jaboo9/MachineLearningNotebooks
|
6fe90ec1bfedcd51da4fa9f709583458cbddcf3c
|
[
"MIT"
] | null | null | null |
how-to-use-azureml/automated-machine-learning/forecasting-bike-share/forecasting_script.py
|
Jaboo9/MachineLearningNotebooks
|
6fe90ec1bfedcd51da4fa9f709583458cbddcf3c
|
[
"MIT"
] | 1
|
2021-06-02T06:31:15.000Z
|
2021-06-02T06:31:15.000Z
|
import argparse
import azureml.train.automl
from azureml.automl.runtime._vendor.automl.client.core.runtime import forecasting_models
from azureml.core import Run
from sklearn.externals import joblib
import forecasting_helper
parser = argparse.ArgumentParser()
parser.add_argument(
'--max_horizon', type=int, dest='max_horizon',
default=10, help='Max Horizon for forecasting')
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
parser.add_argument(
'--time_column_name', type=str, dest='time_column_name',
help='Time Column Name')
parser.add_argument(
'--frequency', type=str, dest='freq',
help='Frequency of prediction')
args = parser.parse_args()
max_horizon = args.max_horizon
target_column_name = args.target_column_name
time_column_name = args.time_column_name
freq = args.freq
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets['test_data']
grain_column_names = []
df = test_dataset.to_pandas_dataframe()
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(
None).keep_columns(columns=[target_column_name])
fitted_model = joblib.load('model.pkl')
df_all = forecasting_helper.do_rolling_forecast(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
target_column_name,
time_column_name,
max_horizon,
freq)
file_name = 'outputs/predictions.csv'
export_csv = df_all.to_csv(file_name, header=True)
# Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name)
| 29.280702
| 88
| 0.777711
|
e2288dcd49d8a7a8cb34bbd4283e96f8939d59f5
| 5,682
|
py
|
Python
|
oss2/exceptions.py
|
rxwen/aliyun-oss-py
|
090fa82414490cded6c7af12802239f6fdd5d268
|
[
"Apache-2.0"
] | null | null | null |
oss2/exceptions.py
|
rxwen/aliyun-oss-py
|
090fa82414490cded6c7af12802239f6fdd5d268
|
[
"Apache-2.0"
] | null | null | null |
oss2/exceptions.py
|
rxwen/aliyun-oss-py
|
090fa82414490cded6c7af12802239f6fdd5d268
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
oss2.exceptions
~~~~~~~~~~~~~~
异常类。
"""
import re
import xml.etree.ElementTree as ElementTree
from xml.parsers import expat
from .compat import to_string
_OSS_ERROR_TO_EXCEPTION = {} # populated at end of module
OSS_CLIENT_ERROR_STATUS = -1
OSS_REQUEST_ERROR_STATUS = -2
OSS_INCONSISTENT_ERROR_STATUS = -3
class OssError(Exception):
def __init__(self, status, headers, body, details):
#: HTTP 状态码
self.status = status
#: 请求ID,用于跟踪一个OSS请求。提交工单时,最好能够提供请求ID
self.request_id = headers.get('x-oss-request-id', '')
#: HTTP响应体(部分)
self.body = body
#: 详细错误信息,是一个string到string的dict
self.details = details
#: OSS错误码
self.code = self.details.get('Code', '')
#: OSS错误信息
self.message = self.details.get('Message', '')
def __str__(self):
error = {'status': self.status,
'details': self.details}
return str(error)
class ClientError(OssError):
def __init__(self, message):
OssError.__init__(self, OSS_CLIENT_ERROR_STATUS, {}, 'ClientError: ' + message, {})
def __str__(self):
error = {'status': self.status,
'details': self.body}
return str(error)
class RequestError(OssError):
def __init__(self, e):
OssError.__init__(self, OSS_REQUEST_ERROR_STATUS, {}, 'RequestError: ' + str(e), {})
self.exception = e
def __str__(self):
error = {'status': self.status,
'details': self.body}
return str(error)
class InconsistentError(OssError):
def __init__(self, message):
OssError.__init__(self, OSS_INCONSISTENT_ERROR_STATUS, {}, 'InconsistentError: ' + message, {})
def __str__(self):
error = {'status': self.status,
'details': self.body}
return str(error)
class ServerError(OssError):
pass
class NotFound(ServerError):
status = 404
code = ''
class MalformedXml(ServerError):
status = 400
code = 'MalformedXML'
class InvalidArgument(ServerError):
status = 400
code = 'InvalidArgument'
def __init__(self, status, headers, body, details):
super(InvalidArgument, self).__init__(status, headers, body, details)
self.name = details.get('ArgumentName')
self.value = details.get('ArgumentValue')
class InvalidObjectName(ServerError):
status = 400
code = 'InvalidObjectName'
class NoSuchBucket(NotFound):
status = 404
code = 'NoSuchBucket'
class NoSuchKey(NotFound):
status = 404
code = 'NoSuchKey'
class NoSuchUpload(NotFound):
status = 404
code = 'NoSuchUpload'
class NoSuchWebsite(NotFound):
status = 404
code = 'NoSuchWebsiteConfiguration'
class NoSuchLifecycle(NotFound):
status = 404
code = 'NoSuchLifecycle'
class NoSuchCors(NotFound):
status = 404
code = 'NoSuchCORSConfiguration'
class NoSuchLiveChannel(NotFound):
status = 404
code = 'NoSuchLiveChannel'
class Conflict(ServerError):
status = 409
code = ''
class BucketNotEmpty(Conflict):
status = 409
code = 'BucketNotEmpty'
class PositionNotEqualToLength(Conflict):
status = 409
code = 'PositionNotEqualToLength'
def __init__(self, status, headers, body, details):
super(PositionNotEqualToLength, self).__init__(status, headers, body, details)
self.next_position = int(headers['x-oss-next-append-position'])
class ObjectNotAppendable(Conflict):
status = 409
code = 'ObjectNotAppendable'
class ChannelStillLive(Conflict):
status = 409
code = 'ChannelStillLive'
class LiveChannelDisabled(Conflict):
status = 409
code = 'LiveChannelDisabled'
class PreconditionFailed(ServerError):
status = 412
code = 'PreconditionFailed'
class NotModified(ServerError):
status = 304
code = ''
class AccessDenied(ServerError):
status = 403
code = 'AccessDenied'
def make_exception(resp):
status = resp.status
headers = resp.headers
body = resp.read(4096)
details = _parse_error_body(body)
code = details.get('Code', '')
try:
klass = _OSS_ERROR_TO_EXCEPTION[(status, code)]
return klass(status, headers, body, details)
except KeyError:
return ServerError(status, headers, body, details)
def _walk_subclasses(klass):
for sub in klass.__subclasses__():
yield sub
for subsub in _walk_subclasses(sub):
yield subsub
for klass in _walk_subclasses(ServerError):
status = getattr(klass, 'status', None)
code = getattr(klass, 'code', None)
if status is not None and code is not None:
_OSS_ERROR_TO_EXCEPTION[(status, code)] = klass
# XML parsing exceptions have changed in Python2.7 and ElementTree 1.3
if hasattr(ElementTree, 'ParseError'):
ElementTreeParseError = (ElementTree.ParseError, expat.ExpatError)
else:
ElementTreeParseError = (expat.ExpatError)
def _parse_error_body(body):
try:
root = ElementTree.fromstring(body)
if root.tag != 'Error':
return {}
details = {}
for child in root:
details[child.tag] = child.text
return details
except ElementTreeParseError:
return _guess_error_details(body)
def _guess_error_details(body):
details = {}
body = to_string(body)
if '<Error>' not in body or '</Error>' not in body:
return details
m = re.search('<Code>(.*)</Code>', body)
if m:
details['Code'] = m.group(1)
m = re.search('<Message>(.*)</Message>', body)
if m:
details['Message'] = m.group(1)
return details
| 21.687023
| 103
| 0.645723
|
989a3e62d2d140a347ede66c54d2a50ae51dac9f
| 516
|
py
|
Python
|
backend/api/authentication/__init__.py
|
jacorea/ismp
|
81cf55559005753f3055165689889b18aec958ac
|
[
"CC0-1.0"
] | 3
|
2020-05-08T03:51:43.000Z
|
2020-06-13T23:12:26.000Z
|
backend/api/authentication/__init__.py
|
jacorea/ismp
|
81cf55559005753f3055165689889b18aec958ac
|
[
"CC0-1.0"
] | 15
|
2020-05-04T05:49:17.000Z
|
2020-06-01T21:31:03.000Z
|
backend/api/authentication/__init__.py
|
jacorea/ismp
|
81cf55559005753f3055165689889b18aec958ac
|
[
"CC0-1.0"
] | 11
|
2020-05-01T04:35:24.000Z
|
2020-05-28T17:17:21.000Z
|
from django.apps import AppConfig
class AuthenticationAppConfig(AppConfig):
name = 'api.authentication'
label = 'authentication'
verbose_name = 'Authentication'
def ready(self):
import api.authentication.signals
# This is how we register our custom app config with Django. Django is smart
# enough to look for the `default_app_config` property of each registered app
# and use the correct app config based on that value.
default_app_config = 'api.authentication.AuthenticationAppConfig'
| 30.352941
| 77
| 0.767442
|
17f5f3d022cebd9ec1879e82a515bf6d9ebdad4b
| 16,853
|
py
|
Python
|
sample_data/Set-PD-Ix-100/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point10/script_DoE10_meshing.py
|
hanklu2020/mabessa_F3DAS
|
57b1bd1cb85d96567ad1044c216535ab3df88db3
|
[
"BSD-3-Clause"
] | null | null | null |
sample_data/Set-PD-Ix-100/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point10/script_DoE10_meshing.py
|
hanklu2020/mabessa_F3DAS
|
57b1bd1cb85d96567ad1044c216535ab3df88db3
|
[
"BSD-3-Clause"
] | null | null | null |
sample_data/Set-PD-Ix-100/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point10/script_DoE10_meshing.py
|
hanklu2020/mabessa_F3DAS
|
57b1bd1cb85d96567ad1044c216535ab3df88db3
|
[
"BSD-3-Clause"
] | null | null | null |
# Abaqus/CAE script
# Created by M.A. Bessa (M.A.Bessa@tudelft.nl) on 12-Nov-2019 00:39:42
#
from abaqus import *
from abaqusConstants import *
session.viewports['Viewport: 1'].makeCurrent()
#session.viewports['Viewport: 1'].maximize()
from caeModules import *
from driverUtils import executeOnCaeStartup
executeOnCaeStartup()
Mdb()
#
import numpy
#------------------------------------------------------------
os.chdir(r'/home/gkus/F3DAS-master/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point10')
#
#-------------------------------------------------------------
# Parameters:
VertexPolygon = 3 # Number of vertices (sides) of the polygon base
power = 1.00000e+00 # Power law exponent establishing the evolution of the spacing between battens
MastDiameter = 1.00000e+02 # Radius of the circumscribing circle of the polygon
nStories = 1 # Number of stories in HALF of the strut (i.e. in a single AstroMast!)
MastPitch = 2.83874e+01 # Pitch length of the strut (i.e. a single AstroMast!)
pinned_joints = 1 # (1 = batten are pinned to longerons, 0 = battens and longerons are a solid piece)
Longeron_CS = 1.00008e+01 # (Cross Section of the longeron)
Ix = 5.40172e+01 # (Second moment of area around X axis )
Iy = 7.50000e+01 # (Second moment of area around Y axis )
J = 2.50000e+02 # (Second moment of area around X axis )
Emodulus = 1.82600e+03 # (Youngus Modulus)
Gmodulus = 6.57369e+02 # (Shear Modulus)
nu = 3.88869e-01 # (Poisson Ratio)
ConeSlope = 5.00000e-01 # Slope of the longerons (0 = straight, <0 larger at the top, >0 larger at the bottom)
Twist_angle = 0.00000e+00 # Do you want to twist the longerons?
transition_length_ratio = 1.00000e+00 # Transition zone for the longerons
#------------------------------------------------------------
MastRadius = MastDiameter/2.0
MastHeight = nStories*MastPitch
Mesh_size = min(MastRadius,MastPitch)/300.0
session.viewports['Viewport: 1'].setValues(displayedObject=None)
# Create all the joints of the a single Deployable Mast:
joints = numpy.zeros((nStories+1,VertexPolygon,3))
joints_outter = numpy.zeros((nStories+1,VertexPolygon,3))
for iStorey in range(0,nStories+1,1):
for iVertex in range(0,VertexPolygon,1):
# Constant spacing between each storey (linear evolution):
Zcoord = MastHeight/nStories*iStorey
# Power-law spacing between each storey (more frequent at the fixed end):
# Zcoord = MastHeight*(float(iStorey)/float(nStories))**power
# Power-law spacing between each storey (more frequent at the rotating end):
# Zcoord = -MastHeight/(float(nStories)**power)*(float(nStories-iStorey)**power)+MastHeight
# Exponential spacing between each storey
# Zcoord =(MastHeight+1.0)/exp(float(nStories))*exp(float(iStorey))
#
Xcoord = MastRadius*cos(2.0*pi/VertexPolygon*iVertex + Twist_angle*min(Zcoord/MastHeight/transition_length_ratio,1.0))
Ycoord = MastRadius*sin(2.0*pi/VertexPolygon*iVertex + Twist_angle*min(Zcoord/MastHeight/transition_length_ratio,1.0))
# Save point defining this joint:
joints[iStorey,iVertex,:] = (Xcoord*(1.0-min(Zcoord,transition_length_ratio*MastHeight)/MastHeight*ConeSlope),Ycoord*(1.0-min(Zcoord,transition_length_ratio*MastHeight)/MastHeight*ConeSlope),Zcoord)
#
center = (0.0,0.0)
vec = joints[iStorey,iVertex,0:2]-center
norm_vec = numpy.linalg.norm(vec)
joints_outter[iStorey,iVertex,2] = joints[iStorey,iVertex,2]
joints_outter[iStorey,iVertex,0:2] = joints[iStorey,iVertex,0:2]
# end iSide loop
#end iStorey loop
# Create the longerons:
p_longerons = mdb.models['Model-1'].Part(name='longerons', dimensionality=THREE_D,
type=DEFORMABLE_BODY)
p_longerons = mdb.models['Model-1'].parts['longerons']
session.viewports['Viewport: 1'].setValues(displayedObject=p_longerons)
d_longerons, r_longerons = p_longerons.datums, p_longerons.referencePoints
LocalDatum_list = [] # List with local coordinate system for each longeron
long_midpoints = [] # List with midpoints of longerons (just to determine a set containing the longerons)
e_long = p_longerons.edges
for iVertex in range(0,VertexPolygon,1):
# First create local coordinate system (useful for future constraints, etc.):
iStorey=0
origin = joints[iStorey,iVertex,:]
point2 = joints[iStorey,iVertex-1,:]
name = 'Local_Datum_'+str(iVertex)
LocalDatum_list.append(p_longerons.DatumCsysByThreePoints(origin=origin, point2=point2, name=name,
coordSysType=CARTESIAN, point1=(0.0, 0.0, 0.0)))
#
# Then, create the longerons
templist = [] # List that will contain the points used to make each longeron
for iStorey in range(0,nStories+1,1):
templist.append(joints[iStorey,iVertex,:])
if iStorey != 0: # Save midpoints of bars
long_midpoints.append( [(joints[iStorey-1,iVertex,:]+joints[iStorey,iVertex,:])/2 , ])
# end if
# end iStorey loop
p_longerons.WirePolyLine(points=templist,
mergeType=IMPRINT, meshable=ON)
# Create set for each longeron (to assign local beam directions)
for i in range(0,len(templist)): # loop over longerons edges
if i == 0:
select_edges = e_long.findAt([templist[0], ]) # Find the first edge
else:
# Now find remaining edges in longerons
temp = e_long.findAt([templist[i], ])
select_edges = select_edges + temp
#end if
#end i loop
longeron_name = 'longeron-'+str(iVertex)+'_set'
p_longerons.Set(edges=select_edges, name=longeron_name)
#end for iVertex loop
# Longerons set:
e_long = p_longerons.edges
select_edges = []
for i in range(0,len(long_midpoints)): # loop over longerons edges
if i == 0:
select_edges = e_long.findAt(long_midpoints[0]) # Find the first edge
else:
# Now find remaining edges in longerons
temp = e_long.findAt(long_midpoints[i])
select_edges = select_edges + temp
#end if
#end i loop
p_longerons.Set(edges=select_edges, name='all_longerons_set')
all_longerons_set_edges = select_edges
p_longerons.Surface(circumEdges=all_longerons_set_edges, name='all_longerons_surface')
# Create a set with all the joints:
v_long = p_longerons.vertices
select_vertices = []
select_top_vertices = []
select_bot_vertices = []
for iStorey in range(0,nStories+1,1):
for iVertex in range(0,VertexPolygon,1):
# Select all the joints in the longerons:
current_joint = v_long.findAt( [joints[iStorey,iVertex,:] , ] ) # Find the first vertex
current_joint_name = 'joint-'+str(iStorey)+'-'+str(iVertex)
# Create a set for each joint:
p_longerons.Set(vertices=current_joint, name=current_joint_name)
#
if iStorey == 0 and iVertex == 0:
select_vertices = current_joint # Instantiate the first point in set
else:
select_vertices = select_vertices + current_joint # Instantiate the first point in set
# endif iStorey == 0 and iVertex == 0
#
if iStorey == 0: # Also save the bottom nodes separately
if iVertex == 0:
# Start selecting the bottom joints for implementing the boundary conditions
select_bot_vertices = current_joint
else:
select_bot_vertices = select_bot_vertices + current_joint
# endif iStorey == 0:
elif iStorey == nStories: # Also save the top nodes separately
if iVertex == 0:
# Start selecting the top joints for implementing the boundary conditions
select_top_vertices = current_joint
else: # remaining vertices:
select_top_vertices = select_top_vertices + current_joint
#end if
#end iVertex loop
#end iStorey loop
p_longerons.Set(vertices=select_vertices, name='all_joints_set')
p_longerons.Set(vertices=select_bot_vertices, name='bot_joints_set')
p_longerons.Set(vertices=select_top_vertices, name='top_joints_set')
#
# Create materials:
mdb.models['Model-1'].Material(name='NiTi_alloy')
mdb.models['Model-1'].materials['NiTi_alloy'].Elastic(table=((83.0E3, 0.31),
))
mdb.models['Model-1'].materials['NiTi_alloy'].Density(table=((1.0E-3, ), ))
mdb.models['Model-1'].Material(name='PC')
mdb.models['Model-1'].materials['PC'].Elastic(table=((2134, 0.27),
))
mdb.models['Model-1'].materials['PC'].Density(table=((1.19E-3, ), ))
mdb.models['Model-1'].Material(name='PLA')
mdb.models['Model-1'].materials['PLA'].Elastic(table=((Emodulus, nu),
))
mdb.models['Model-1'].materials['PLA'].Density(table=((1.24E-3, ), ))
mdb.models['Model-1'].Material(name='CNT')
mdb.models['Model-1'].materials['CNT'].Elastic(table=((1000.0E3, 0.3),
))
mdb.models['Model-1'].materials['CNT'].Density(table=((1.0E-3, ), ))
# Create beam profiles and beam sections:
mdb.models['Model-1'].GeneralizedProfile(name='LongeronsProfile', area=Longeron_CS, i11=Ix, i12=0.0, i22=Iy, j=J, gammaO=0.0, gammaW=0.0)
mdb.models['Model-1'].BeamSection(name='LongeronsSection', integration=
BEFORE_ANALYSIS, poissonRatio=0.31, beamShape=CONSTANT,
profile='LongeronsProfile', density=0.00124, thermalExpansion=OFF,
temperatureDependency=OFF, dependencies=0, table=((Emodulus, Gmodulus), ),
alphaDamping=0.0, betaDamping=0.0, compositeDamping=0.0, centroid=(0.0,
0.0), shearCenter=(0.0, 0.0), consistentMassMatrix=False)
# Assign respective sections:
p_longerons.SectionAssignment(offset=0.0,
offsetField='', offsetType=MIDDLE_SURFACE, region=
p_longerons.sets['all_longerons_set'],
sectionName='LongeronsSection', thicknessAssignment=FROM_SECTION)
# Assing beam orientation:
for iVertex in range(0,VertexPolygon,1):
iStorey=0
dir_vec_n1 = joints[iStorey,iVertex,:]-(0.,0.,0.) # Vector n1 perpendicular to the longeron tangent
longeron_name = 'longeron-'+str(iVertex)+'_set'
region=p_longerons.sets[longeron_name]
p_longerons.assignBeamSectionOrientation(region=region, method=N1_COSINES, n1=dir_vec_n1)
#end for iVertex
#
delta = Mesh_size/100.0
########################################################################
#Mesh the structure
#refPlane = p_longerons.DatumPlaneByPrincipalPlane(principalPlane=XYPLANE, offset=L/2)
#d = p.datums
#All_faces = facesLeafs+facesDoubleThickBoom
#p.PartitionFaceByDatumPlane(datumPlane=d[refPlane.id], faces=All_faces)
##
#session.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=OFF
# engineeringFeatures=OFF, mesh=ON)
#session.viewports['Viewport: 1'].partDisplay.meshOptions.setValues(
# meshTechnique=ON)
#p = mdb.models['Model-1'].parts['reducedCF_TRAC_boom']
p_longerons.seedPart(size=Mesh_size, deviationFactor=0.04, minSizeFactor=0.001,
constraint=FINER)
p_longerons.seedEdgeBySize(edges=all_longerons_set_edges, size=Mesh_size, deviationFactor=0.04,
constraint=FINER)
elemType_longerons = mesh.ElemType(elemCode=B31, elemLibrary=STANDARD) # Element type
p_longerons.setElementType(regions=(all_longerons_set_edges, ), elemTypes=(elemType_longerons, ))
p_longerons.generateMesh()
#######################################################################
# Make Analytical surfaces for contact purposes
s1 = mdb.models['Model-1'].ConstrainedSketch(name='__profile__',
sheetSize=MastRadius*3.0)
g, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints
s1.setPrimaryObject(option=STANDALONE)
s1.Line(point1=(0.0, -MastRadius*1.1), point2=(0.0, MastRadius*1.1))
s1.VerticalConstraint(entity=g[2], addUndoState=False)
p_surf = mdb.models['Model-1'].Part(name='AnalyticSurf', dimensionality=THREE_D,
type=ANALYTIC_RIGID_SURFACE)
p_surf = mdb.models['Model-1'].parts['AnalyticSurf']
p_surf.AnalyticRigidSurfExtrude(sketch=s1, depth=MastRadius*2.2)
s1.unsetPrimaryObject()
rigid_face = p_surf.faces
#surf_select = f.findAt((0.0,MastRadius*1.05,0.0))
#surf_select = f[0]
p_surf.Surface(side1Faces=rigid_face, name='rigid_support')
#p_surf.Set(faces=surf_select, name='support_surface_set')
#p_surf.sets['all_diagonals_set']
#
# Make assembly:
a = mdb.models['Model-1'].rootAssembly
a.DatumCsysByDefault(CARTESIAN)
# Create reference points to assign boundary conditions
RP_ZmYmXm = a.ReferencePoint(point=(0.0, 0.0, -1.1*MastRadius))
refpoint_ZmYmXm = (a.referencePoints[RP_ZmYmXm.id],)
a.Set(referencePoints=refpoint_ZmYmXm, name='RP_ZmYmXm')
#
RP_ZpYmXm = a.ReferencePoint(point=(0.0, 0.0, MastHeight+1.1*MastRadius))
refpoint_ZpYmXm = (a.referencePoints[RP_ZpYmXm.id],)
a.Set(referencePoints=refpoint_ZpYmXm, name='RP_ZpYmXm')
#
# Create longerons
a_long = a.Instance(name='longerons-1-1', part=p_longerons, dependent=ON)
# Create bottom surface
a_surf_bot = a.Instance(name='AnalyticSurf-1-1', part=p_surf, dependent=ON)
# Now rotate the plane to have the proper direction
a.rotate(instanceList=('AnalyticSurf-1-1', ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=(0.0, 1.0, 0.0), angle=90.0)
#
# Create set with surface
select_bot_surf=a_surf_bot.surfaces['rigid_support']
# Perhaps we need to define a set instead of a face
#AnalyticSurf_surface=a_surf_bot.Surface(side1Faces=select_bot_surf, name='support_surf_bot-1')
mdb.models['Model-1'].RigidBody(name='Constraint-RigidBody_surf_bot-1', refPointRegion=refpoint_ZmYmXm,
surfaceRegion=select_bot_surf)
for iVertex in range(0,VertexPolygon,1):
#
# Select appropriate coordinate system:
DatumID = LocalDatum_list[iVertex].id
datum = a_long.datums[DatumID]
for iStorey in range(0,nStories+1,1):
# Current joint:
current_joint_name = 'joint-'+str(iStorey)+'-'+str(iVertex)
# Define COUPLING constraints for all the joints:
if iStorey == 0: # Bottom base:
#
master_region=a.sets['RP_ZmYmXm'] # Note that the master is the Reference Point
#
slave_region=a_long.sets[current_joint_name]
# Make constraint for this joint:
Constraint_name = 'RP_ZmYmXm_PinConstraint-'+str(iStorey)+'-'+str(iVertex)
mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region,
surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=OFF, ur2=ON, ur3=ON)
#
#Constraint_name = 'RP_ZmYmXm_FixedConstraint-'+str(iStorey)+'-'+str(iVertex)
#mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region,
# surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
# localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
# Make constraint for this joint:
elif iStorey == nStories: # Top base:
#
master_region=a.sets['RP_ZpYmXm'] # Note that the master is the Reference Point
#
slave_region=a_long.sets[current_joint_name]
# Make constraint for this joint:
Constraint_name = 'RP_ZpYmXm_PinConstraint-'+str(iStorey)+'-'+str(iVertex)
mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region,
surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=OFF, ur2=ON, ur3=ON)
#
#Constraint_name = 'RP_ZpYmXm_FixedConstraint-'+str(iStorey)+'-'+str(iVertex)
#mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region,
# surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
# localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
# Make constraint for this joint:
else: # Middle stories:
master_region=a_long.sets[current_joint_name]
#
slave_region=a_bat.sets[current_joint_name]
# Make constraint for this joint:
#endif iStorey
#
#end for iStorey
#end for iVertex
#
# Create hinges:
#select_joints=a.instances['deployable_mast-1'].sets['all_joints_set']
#select_RefPoint=a.sets['RP_joints']
#mdb.models['Model-1'].RigidBody(name='JointsContraint', refPointRegion=select_RefPoint,
# pinRegion=select_joints)
#
# Export mesh to .inp file
#
mdb.Job(name='include_mesh_DoE10', model='Model-1', type=ANALYSIS, explicitPrecision=SINGLE,
nodalOutputPrecision=SINGLE, description='',
parallelizationMethodExplicit=DOMAIN, multiprocessingMode=DEFAULT,
numDomains=1, userSubroutine='', numCpus=1, memory=90,
memoryUnits=PERCENTAGE, scratch='', echoPrint=OFF, modelPrint=OFF,
contactPrint=OFF, historyPrint=OFF)
import os
mdb.jobs['include_mesh_DoE10'].writeInput(consistencyChecking=OFF)
# End of python script
| 44.467018
| 206
| 0.692933
|
8ef0138c93d6fca21405c2b6172e89b5ed02dada
| 134
|
py
|
Python
|
pymediaroom/__init__.py
|
MartinHjelmare/pymediaroom
|
f4f2686c8d5622dd5ae1bcdd76900ba35e148529
|
[
"MIT"
] | null | null | null |
pymediaroom/__init__.py
|
MartinHjelmare/pymediaroom
|
f4f2686c8d5622dd5ae1bcdd76900ba35e148529
|
[
"MIT"
] | null | null | null |
pymediaroom/__init__.py
|
MartinHjelmare/pymediaroom
|
f4f2686c8d5622dd5ae1bcdd76900ba35e148529
|
[
"MIT"
] | null | null | null |
from .remote import *
from .commands import *
from .error import *
from .notify import install_mediaroom_protocol
version = '0.6.3'
| 19.142857
| 47
| 0.753731
|
4a456bc210d5410287e518416584b5b260be8d2e
| 288
|
py
|
Python
|
pypesto/profile/__init__.py
|
m-philipps/pyPESTO
|
4c30abfca56ba714c302141cd44a9dd366bff4bb
|
[
"BSD-3-Clause"
] | null | null | null |
pypesto/profile/__init__.py
|
m-philipps/pyPESTO
|
4c30abfca56ba714c302141cd44a9dd366bff4bb
|
[
"BSD-3-Clause"
] | null | null | null |
pypesto/profile/__init__.py
|
m-philipps/pyPESTO
|
4c30abfca56ba714c302141cd44a9dd366bff4bb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Profile
=======
"""
from .approximate import approximate_parameter_profile
from .options import ProfileOptions
from .profile import parameter_profile
from .util import calculate_approximate_ci, chi2_quantile_to_ratio
from .validation_intervals import validation_profile_significance
| 26.181818
| 66
| 0.84375
|
8d2c5f4ae68bb563025936a8f042a2c95610c030
| 17,259
|
py
|
Python
|
attentive_gan_model/attentive_gan_net.py
|
sohaibrabbani/weather-removal-GAN
|
34e277737d4842f1aa3559919b27d3622ab25075
|
[
"MIT"
] | 235
|
2018-07-31T15:53:33.000Z
|
2022-03-28T11:25:00.000Z
|
attentive_gan_model/attentive_gan_net.py
|
sohaibrabbani/weather-removal-GAN
|
34e277737d4842f1aa3559919b27d3622ab25075
|
[
"MIT"
] | 83
|
2018-09-07T04:29:14.000Z
|
2022-03-31T17:06:32.000Z
|
attentive_gan_model/attentive_gan_net.py
|
sohaibrabbani/weather-removal-GAN
|
34e277737d4842f1aa3559919b27d3622ab25075
|
[
"MIT"
] | 88
|
2018-08-16T10:55:16.000Z
|
2022-03-07T07:19:58.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 18-6-26 上午11:45
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/attentive-gan-derainnet
# @File : attentive_gan_net.py
# @IDE: PyCharm
"""
实现Attentive GAN Network中的Attentive-Recurrent Network
"""
import tensorflow as tf
from attentive_gan_model import cnn_basenet
from attentive_gan_model import vgg16
from config import global_config
CFG = global_config.cfg
class GenerativeNet(cnn_basenet.CNNBaseModel):
"""
实现Attentive GAN Network中的生成网络 Fig(2)中的generator部分
"""
def __init__(self, phase):
"""
:return:
"""
super(GenerativeNet, self).__init__()
self._vgg_extractor = vgg16.VGG16Encoder(phase='test')
self._train_phase = tf.constant('train', dtype=tf.string)
self._test_phase = tf.constant('test', dtype=tf.string)
self._phase = phase
self._is_training = self._init_phase()
def _init_phase(self):
"""
:return:
"""
return tf.equal(self._phase, self._train_phase)
def build(self, input_tensor):
"""
:param input_tensor:
:return:
"""
pass
def _residual_block(self, input_tensor, name):
"""
attentive recurrent net中的residual block
:param input_tensor:
:param name:
:return:
"""
output = None
with tf.variable_scope(name):
inputs = input_tensor
shortcut = input_tensor
for i in range(6):
if i == 0:
inputs = self.conv2d(inputdata=inputs,
out_channel=32,
kernel_size=3,
padding='SAME',
stride=1,
use_bias=True,
name='block_{:d}_conv_1'.format(i))
# TODO reimplement residual block
inputs = self.lrelu(inputdata=inputs, name='block_{:d}_relu_1'.format(i + 1))
output = inputs
shortcut = output
else:
inputs = self.conv2d(inputdata=inputs,
out_channel=32,
kernel_size=3,
padding='SAME',
stride=1,
use_bias=True,
name='block_{:d}_conv_1'.format(i))
inputs = self.lrelu(inputdata=inputs, name='block_{:d}_conv_1'.format(i + 1))
inputs = self.conv2d(inputdata=inputs,
out_channel=32,
kernel_size=3,
padding='SAME',
stride=1,
use_bias=True,
name='block_{:d}_conv_2'.format(i))
inputs = self.lrelu(inputdata=inputs, name='block_{:d}_conv_2'.format(i + 1))
output = self.lrelu(inputdata=tf.add(inputs, shortcut),
name='block_{:d}_add'.format(i))
shortcut = output
return output
def _conv_lstm(self, input_tensor, input_cell_state, name):
"""
attentive recurrent net中的convolution lstm 见公式(3)
:param input_tensor:
:param input_cell_state:
:param name:
:return:
"""
with tf.variable_scope(name):
conv_i = self.conv2d(inputdata=input_tensor, out_channel=32, kernel_size=3, padding='SAME',
stride=1, use_bias=False, name='conv_i')
sigmoid_i = self.sigmoid(inputdata=conv_i, name='sigmoid_i')
conv_f = self.conv2d(inputdata=input_tensor, out_channel=32, kernel_size=3, padding='SAME',
stride=1, use_bias=False, name='conv_f')
sigmoid_f = self.sigmoid(inputdata=conv_f, name='sigmoid_f')
cell_state = \
sigmoid_f * input_cell_state + \
sigmoid_i * tf.nn.tanh(self.conv2d(inputdata=input_tensor,
out_channel=32,
kernel_size=3,
padding='SAME',
stride=1,
use_bias=False,
name='conv_c'))
conv_o = self.conv2d(inputdata=input_tensor, out_channel=32, kernel_size=3, padding='SAME',
stride=1, use_bias=False, name='conv_o')
sigmoid_o = self.sigmoid(inputdata=conv_o, name='sigmoid_o')
lstm_feats = sigmoid_o * tf.nn.tanh(cell_state)
attention_map = self.conv2d(inputdata=lstm_feats, out_channel=1, kernel_size=3, padding='SAME',
stride=1, use_bias=False, name='attention_map')
attention_map = self.sigmoid(inputdata=attention_map)
ret = {
'attention_map': attention_map,
'cell_state': cell_state,
'lstm_feats': lstm_feats
}
return ret
def build_attentive_rnn(self, input_tensor, name, reuse=False):
"""
Generator的attentive recurrent部分, 主要是为了找到attention部分
:param input_tensor:
:param name:
:param reuse:
:return:
"""
[batch_size, tensor_h, tensor_w, _] = input_tensor.get_shape().as_list()
with tf.variable_scope(name, reuse=reuse):
init_attention_map = tf.constant(0.5, dtype=tf.float32,
shape=[batch_size, tensor_h, tensor_w, 1])
init_cell_state = tf.constant(0.0, dtype=tf.float32,
shape=[batch_size, tensor_h, tensor_w, 32])
init_lstm_feats = tf.constant(0.0, dtype=tf.float32,
shape=[batch_size, tensor_h, tensor_w, 32])
attention_map_list = []
for i in range(4):
attention_input = tf.concat((input_tensor, init_attention_map), axis=-1)
conv_feats = self._residual_block(input_tensor=attention_input,
name='residual_block_{:d}'.format(i + 1))
lstm_ret = self._conv_lstm(input_tensor=conv_feats,
input_cell_state=init_cell_state,
name='conv_lstm_block_{:d}'.format(i + 1))
init_attention_map = lstm_ret['attention_map']
init_cell_state = lstm_ret['cell_state']
init_lstm_feats = lstm_ret['lstm_feats']
attention_map_list.append(lstm_ret['attention_map'])
ret = {
'final_attention_map': init_attention_map,
'final_lstm_feats': init_lstm_feats,
'attention_map_list': attention_map_list
}
return ret
def compute_attentive_rnn_loss(self, input_tensor, label_tensor, name, reuse=False):
"""
计算attentive rnn损失
:param input_tensor:
:param label_tensor:
:param name:
:param reuse:
:return:
"""
with tf.variable_scope(name, reuse=reuse):
inference_ret = self.build_attentive_rnn(input_tensor=input_tensor,
name='attentive_inference')
loss = tf.constant(0.0, tf.float32)
n = len(inference_ret['attention_map_list'])
for index, attention_map in enumerate(inference_ret['attention_map_list']):
mse_loss = tf.pow(0.8, n - index + 1) * \
tf.losses.mean_squared_error(labels=label_tensor,
predictions=attention_map)
loss = tf.add(loss, mse_loss)
return loss, inference_ret['final_attention_map']
def build_autoencoder(self, input_tensor, name, reuse=False):
"""
Generator的autoencoder部分, 负责获取图像上下文信息
:param input_tensor:
:param name:
:param reuse:
:return:
"""
with tf.variable_scope(name, reuse=reuse):
conv_1 = self.conv2d(inputdata=input_tensor, out_channel=64, kernel_size=5,
padding='SAME',
stride=1, use_bias=False, name='conv_1')
relu_1 = self.lrelu(inputdata=conv_1, name='relu_1')
conv_2 = self.conv2d(inputdata=relu_1, out_channel=128, kernel_size=3,
padding='SAME',
stride=2, use_bias=False, name='conv_2')
relu_2 = self.lrelu(inputdata=conv_2, name='relu_2')
conv_3 = self.conv2d(inputdata=relu_2, out_channel=128, kernel_size=3,
padding='SAME',
stride=1, use_bias=False, name='conv_3')
relu_3 = self.lrelu(inputdata=conv_3, name='relu_3')
conv_4 = self.conv2d(inputdata=relu_3, out_channel=128, kernel_size=3,
padding='SAME',
stride=2, use_bias=False, name='conv_4')
relu_4 = self.lrelu(inputdata=conv_4, name='relu_4')
conv_5 = self.conv2d(inputdata=relu_4, out_channel=256, kernel_size=3,
padding='SAME',
stride=1, use_bias=False, name='conv_5')
relu_5 = self.lrelu(inputdata=conv_5, name='relu_5')
conv_6 = self.conv2d(inputdata=relu_5, out_channel=256, kernel_size=3,
padding='SAME',
stride=1, use_bias=False, name='conv_6')
relu_6 = self.lrelu(inputdata=conv_6, name='relu_6')
dia_conv1 = self.dilation_conv(input_tensor=relu_6, k_size=3, out_dims=256, rate=2,
padding='SAME', use_bias=False, name='dia_conv_1')
relu_7 = self.lrelu(dia_conv1, name='relu_7')
dia_conv2 = self.dilation_conv(input_tensor=relu_7, k_size=3, out_dims=256, rate=4,
padding='SAME', use_bias=False, name='dia_conv_2')
relu_8 = self.lrelu(dia_conv2, name='relu_8')
dia_conv3 = self.dilation_conv(input_tensor=relu_8, k_size=3, out_dims=256, rate=8,
padding='SAME', use_bias=False, name='dia_conv_3')
relu_9 = self.lrelu(dia_conv3, name='relu_9')
dia_conv4 = self.dilation_conv(input_tensor=relu_9, k_size=3, out_dims=256, rate=16,
padding='SAME', use_bias=False, name='dia_conv_4')
relu_10 = self.lrelu(dia_conv4, name='relu_10')
conv_7 = self.conv2d(inputdata=relu_10, out_channel=256, kernel_size=3,
padding='SAME', stride=1, use_bias=False,
name='conv_7')
relu_11 = self.lrelu(inputdata=conv_7, name='relu_11')
conv_8 = self.conv2d(inputdata=relu_11, out_channel=256, kernel_size=3,
padding='SAME', stride=1, use_bias=False,
name='conv_8')
relu_12 = self.lrelu(inputdata=conv_8, name='relu_12')
deconv_1 = self.deconv2d(inputdata=relu_12, out_channel=128, kernel_size=4,
stride=2, padding='SAME', use_bias=False, name='deconv_1')
avg_pool_1 = self.avgpooling(inputdata=deconv_1, kernel_size=2, stride=1, padding='SAME',
name='avg_pool_1')
relu_13 = self.lrelu(inputdata=avg_pool_1, name='relu_13')
conv_9 = self.conv2d(inputdata=tf.add(relu_13, relu_3), out_channel=128, kernel_size=3,
padding='SAME', stride=1, use_bias=False,
name='conv_9')
relu_14 = self.lrelu(inputdata=conv_9, name='relu_14')
deconv_2 = self.deconv2d(inputdata=relu_14, out_channel=64, kernel_size=4,
stride=2, padding='SAME', use_bias=False, name='deconv_2')
avg_pool_2 = self.avgpooling(inputdata=deconv_2, kernel_size=2, stride=1, padding='SAME',
name='avg_pool_2')
relu_15 = self.lrelu(inputdata=avg_pool_2, name='relu_15')
conv_10 = self.conv2d(inputdata=tf.add(relu_15, relu_1), out_channel=32, kernel_size=3,
padding='SAME', stride=1, use_bias=False,
name='conv_10')
relu_16 = self.lrelu(inputdata=conv_10, name='relu_16')
skip_output_1 = self.conv2d(inputdata=relu_12, out_channel=3, kernel_size=3,
padding='SAME', stride=1, use_bias=False,
name='skip_ouput_1')
skip_output_2 = self.conv2d(inputdata=relu_14, out_channel=3, kernel_size=3,
padding='SAME', stride=1, use_bias=False,
name='skip_output_2')
skip_output_3 = self.conv2d(inputdata=relu_16, out_channel=3, kernel_size=3,
padding='SAME', stride=1, use_bias=False,
name='skip_output_3')
# 传统GAN输出层都使用tanh函数激活
skip_output_3 = tf.nn.tanh(skip_output_3, name='skip_output_3_tanh')
ret = {
'skip_1': skip_output_1,
'skip_2': skip_output_2,
'skip_3': skip_output_3
}
return ret
def compute_autoencoder_loss(self, input_tensor, label_tensor, name, reuse=False):
"""
计算自编码器损失函数
:param input_tensor:
:param label_tensor:
:param name:
:param reuse:
:return:
"""
[_, ori_height, ori_width, _] = label_tensor.get_shape().as_list()
label_tensor_ori = label_tensor
label_tensor_resize_2 = tf.image.resize_bilinear(images=label_tensor,
size=(int(ori_height / 2), int(ori_width / 2)))
label_tensor_resize_4 = tf.image.resize_bilinear(images=label_tensor,
size=(int(ori_height / 4), int(ori_width / 4)))
label_list = [label_tensor_resize_4, label_tensor_resize_2, label_tensor_ori]
lambda_i = [0.6, 0.8, 1.0]
with tf.variable_scope(name, reuse=reuse):
# 计算lm_loss(见公式(5))
lm_loss = tf.constant(0.0, tf.float32, name="lm_loss")
inference_ret = self.build_autoencoder(input_tensor=input_tensor, name='autoencoder_inference')
output_list = [inference_ret['skip_1'], inference_ret['skip_2'], inference_ret['skip_3']]
for index, output in enumerate(output_list):
mse_loss = tf.losses.mean_squared_error(output, label_list[index]) * lambda_i[index]
mse_loss = tf.identity(mse_loss, name='mse_loss')
lm_loss = tf.add(lm_loss, mse_loss)
# 计算lp_loss(见公式(6))
src_vgg_feats = self._vgg_extractor.extract_feats(input_tensor=label_tensor,
name='vgg_feats',
reuse=False)
pred_vgg_feats = self._vgg_extractor.extract_feats(input_tensor=output_list[-1],
name='vgg_feats',
reuse=True)
lp_losses = []
for index, feats in enumerate(src_vgg_feats):
lp_losses.append(tf.losses.mean_squared_error(src_vgg_feats[index], pred_vgg_feats[index]))
lp_loss = tf.reduce_mean(lp_losses, name='lp_loss')
loss = tf.add(lm_loss, lp_loss, name='autoencoder_loss')
return loss, inference_ret['skip_3']
if __name__ == '__main__':
input_image = tf.placeholder(dtype=tf.float32, shape=[1, 256, 256, 3])
auto_label_image = tf.placeholder(dtype=tf.float32, shape=[1, 256, 256, 3])
rnn_label_image = tf.placeholder(dtype=tf.float32, shape=[1, 256, 256, 1])
net = GenerativeNet(phase=tf.constant('train', tf.string))
rnn_loss = net.compute_attentive_rnn_loss(input_image, rnn_label_image, name='rnn_loss')
autoencoder_loss = net.compute_autoencoder_loss(input_image, auto_label_image, name='autoencoder_loss')
for vv in tf.trainable_variables():
print(vv.name)
| 46.395161
| 107
| 0.522974
|
11de698e14944b1b7d21742096c2130579c4d3b3
| 6,777
|
py
|
Python
|
src/parsing/grammar.py
|
iwasingh/Wikoogle
|
ef39b4f96347c9899721ea78403d8db84e0c2b82
|
[
"MIT"
] | 8
|
2020-06-27T08:56:30.000Z
|
2021-09-29T21:31:24.000Z
|
src/parsing/grammar.py
|
iwasingh/Wikoogle
|
ef39b4f96347c9899721ea78403d8db84e0c2b82
|
[
"MIT"
] | 2
|
2020-09-03T15:52:17.000Z
|
2021-03-31T19:53:56.000Z
|
src/parsing/grammar.py
|
iwasingh/Wikoogle
|
ef39b4f96347c9899721ea78403d8db84e0c2b82
|
[
"MIT"
] | 1
|
2020-06-29T15:50:51.000Z
|
2020-06-29T15:50:51.000Z
|
import logging
import re
import parsing.parser as p
from .combinators import pipe, expect, extract, seq, sor, rep, ParseError
from .symbols import Template, Text, Link, Heading, Heading6, Heading5, Heading4, Heading3, Comment, Bold, \
ItalicAndBold, Italic
from .utils import recursive
# import src.parsing.lexer as l
logger = logging.getLogger('Grammar')
# TODO move symbols in a own file
class Grammar:
"""Handles the grammar on which the parser will depend upon
Each production rule is described in the EBNF or ABNF form and might be simplified from the original one
You can find the grammar for Wikimedia in the ABNF form here(https://www.mediawiki.org/wiki/Preprocessor_ABNF).
An internal grammar definition might be used because for index purpose some rules are useless
"""
rules = {}
def __init__(self):
pass
# Add additional rules
def rule(self, rule):
pass
def expression(self):
"""
Wikimedia primary expression
ε : = text
expression := template
| heading_2
| link
| ε
:param parser:
:return:
"""
# sor(*Grammar.rules.values())
return sor(
self.template,
self.link,
self.headings,
self.epsilon
)
@staticmethod
def __expression():
return sor(
Grammar.template,
Grammar.link,
Grammar.headings,
Grammar.epsilon
)
@staticmethod
def template(parser):
"""Template grammar
Wikimedia ABNF
template = "{{", title, { "|", part }, "}}" ;
part = [ name, "=" ], value ;
title = text ;
------
Internal
text := ε
template := '{{' text '}}'
Templates are used to call functions and do some particular formatting
Only the title might be necessary, this is why the template is simplified with a simple text inside brackets,
therefore there is no recursion.
:param parser:
:return:
"""
result = pipe(parser,
seq(expect(Template.start), Grammar.epsilon, expect(Template.end)),
extract)
if result:
return p.Node(p.TemplateP(result.value))
return None
# return TemplateT.parse(parser)
@staticmethod
def link(parser):
"""Link grammar
Wikimedia EBNF
start link = "[[";
end link = "]]";
internal link = start link, full pagename, ["|", label], end link,
---
Internal
pagename := ε
expression := template
| link
| ε
link := '[[' pagename, { expression } ']]'
The link contain the page name, and 0 or more repetitions of the expression ["|", label]. That is simplified with
an expression that can by any one of the wikimedia non-terminals (text, template, link for now)
Watch out left recursion (a link can contain a link)
TODO add external link too, https://en.wikipedia.org/wiki/Help:Link#External_links
:param parser:
:return:
"""
# expression = sor(expect(Link.end), rep(sor(Grammar.epsilon, Grammar.template, Grammar.link), Link.end))
def extractor(arr):
return (lambda _, c, children, __: (c, children))(*arr)
result = pipe(parser,
seq(expect(Link.start),
Grammar.epsilon,
rep(sor(Grammar.epsilon, Grammar.template, Grammar.link), Link.end),
expect(Link.end)),
extractor)
if result:
(content, nodes) = result
node = p.LinkNode(p.LinkP(content.value))
for n in nodes:
node.add(n)
return node
return None
@staticmethod
def headings(parser):
""" Heading
Wikimedia EBNF
header end = [whitespace], line break;
header6 = line break, "======", [whitespace], text, [whitespace], "======", header end;
header5 = line break, "=====", [whitespace], text, [whitespace], "=====", header end;
header4 = line break, "====", [whitespace], text, [whitespace], "====", header end;
header3 = line break, "===", [whitespace], text, [whitespace], "===", header end;
header2 = line break, "==", [whitespace], text, [whitespace], "==", header end;
---
Internal EBNF
header6 = "======", text, "======";
header5 = "=====", text, "=====";
header4 = "====", text, "====";
header3 = "===", text, "===";
header2 = "==", text, "==";
NOTE: Linebreak is one of the ignored character in the lexer, i should consider them TODO
"""
precedence = [
Heading6,
Heading5,
Heading4,
Heading3,
Heading
]
def extractor(r):
_, arr, __ = r
return arr[0]
try:
result = pipe(parser, sor(
*[seq(expect(i.start), rep(sor(Grammar.epsilon, Grammar.template, Grammar.link), i.end), expect(i.end))
for i in precedence]),
extractor)
except ParseError as e:
raise e
if result:
return p.Node(p.HeadingP(result.value))
return None
@staticmethod
def text(parser):
return Grammar.epsilon(parser)
@staticmethod
def epsilon(parser):
"""Basic epsilon that consume the token and proceed aka Text for now.
Maybe i'll further extend this to handle cases like left-recursion
:param parser:
:return:
"""
result = expect(Text.start)(parser)
if result:
return p.Node(p.TextP(result.text))
return None
@staticmethod
def linebreak(parser):
pass
@staticmethod
def table(parser):
"""Table grammar
Tables are threatened as text, hence will be indexed including formatting attributes not
useful for indexing purpose
"""
pass
@staticmethod
def comment(parser):
result = pipe(parser,
seq(expect(Comment.start), Grammar.epsilon, expect(Comment.end)),
extract)
if result:
return p.Node(p.CommentP(result.value))
return None
# @staticmethod
# def formatting(parser):
# result = pipe(parser, sor())
| 29.986726
| 121
| 0.531504
|
69bc1b272f2322bb882336928245a5967af4a226
| 1,502
|
py
|
Python
|
day04/main.py
|
carterbourette/advent-of-code
|
b031ea923a4f27487ffb43acdd5bef228c3dfa42
|
[
"MIT"
] | 1
|
2020-12-05T20:54:08.000Z
|
2020-12-05T20:54:08.000Z
|
day04/main.py
|
carterbourette/advent-of-code
|
b031ea923a4f27487ffb43acdd5bef228c3dfa42
|
[
"MIT"
] | null | null | null |
day04/main.py
|
carterbourette/advent-of-code
|
b031ea923a4f27487ffb43acdd5bef228c3dfa42
|
[
"MIT"
] | null | null | null |
import utility
"""Day 04: Passport Processing"""
inputs = utility.inputs(
parse=lambda line: [ field for field in line.split() ],
pre_process='\n\n'
)
def valid_height(x):
is_cm = x.endswith('cm') and 150 <= int(x[:-2]) <= 193
is_in = x.endswith('in') and 59 <= int(x[:-2]) <= 76
return is_cm or is_in
FIELDS = {
'byr': lambda x: 1920 <= int(x) <= 2002,
'iyr': lambda x: 2010 <= int(x) <= 2020,
'eyr': lambda x: 2020 <= int(x) <= 2030,
'hgt': valid_height,
'hcl': lambda x: x[0] == '#' and len(x) == 7 and all(c.isdigit() or c in 'abcdef' for c in x[1:]),
'ecl': lambda x: x in ('amb','blu','brn','gry','grn','hzl','oth'),
'pid': lambda x: len(x) == 9 and all(c.isdigit() for c in x)
}
REQUIRED = set(FIELDS)
def parse():
records = []
for line in inputs:
record = {}
for field in line:
key, val = field.split(':')
record[key] = val
records.append(record)
return records
def part1():
records = parse()
valid = sum(1 for record in records if set(record.keys()) >= REQUIRED)
return utility.solution({ 'valid': valid }, test=2)
def part2():
records = parse()
valid = 0
for record in records:
is_super = set(record.keys()) >= REQUIRED
if is_super and all(validator(record[field]) for field, validator in FIELDS.items()):
valid += 1
return utility.solution({ 'valid': valid })
if __name__ == '__main__':
utility.cli()
| 25.033333
| 102
| 0.563249
|
c76be2cf1a62170975c718e279b9342f8d6c0d86
| 5,889
|
py
|
Python
|
loaders/base_loader.py
|
agis85/spatial_factorisation
|
233d72511ffb52f52214a68f1c996555345991d0
|
[
"MIT"
] | 15
|
2019-03-08T13:42:28.000Z
|
2021-05-06T12:08:24.000Z
|
loaders/base_loader.py
|
agis85/spatial_factorisation
|
233d72511ffb52f52214a68f1c996555345991d0
|
[
"MIT"
] | null | null | null |
loaders/base_loader.py
|
agis85/spatial_factorisation
|
233d72511ffb52f52214a68f1c996555345991d0
|
[
"MIT"
] | 3
|
2019-07-07T14:00:20.000Z
|
2020-10-07T17:11:00.000Z
|
import os
import numpy as np
from abc import abstractmethod
class Loader(object):
"""
Abstract class defining the behaviour of loaders for different datasets.
"""
def __init__(self):
self.num_masks = 0
self.num_volumes = 0
self.input_shape = (None, None, 1)
self.data_folder = None
self.volumes = sorted(self.splits()[0]['training'] +
self.splits()[0]['validation'] +
self.splits()[0]['test'])
self.log = None
@abstractmethod
def splits(self):
"""
:return: an array of splits into validation, test and train indices
"""
pass
@abstractmethod
def load_labelled_data(self, split, split_type, modality, normalise=True, value_crop=True, downsample=1):
"""
Load labelled data from saved numpy arrays.
Assumes a naming convention of numpy arrays as:
<dataset_name>_images.npz, <dataset_name>_masks_lv.npz, <dataset_name>_masks_myo.npz etc.
If numpy arrays are not found, then data is loaded from sources and saved in numpy arrays.
:param split: the split number, e.g. 0, 1
:param split_type: the split type, e.g. training, validation, test, all (for all data)
:param modality: modality to load if the dataset has multimodal data
:param normalise: True/False: normalise images to [-1, 1]
:param value_crop: True/False: crop values between 5-95 percentiles
:param downsample: downsample image ratio - used for for testing
:return: a Data object containing the loaded data
"""
pass
@abstractmethod
def load_unlabelled_data(self, split, split_type, modality='MR', normalise=True, value_crop=True):
"""
Load unlabelled data from saved numpy arrays.
Assumes a naming convention of numpy arrays as ul_<dataset_name>_images.npz
If numpy arrays are not found, then data is loaded from sources and saved in numpy arrays.
:param split: the split number, e.g. 0, 1
:param split_type: the split type, e.g. training, validation, test, all (for all data)
:param modality: modality to load if the dataset has multimodal data
:param normalise: True/False: normalise images to [-1, 1]
:param value_crop: True/False: crop values between 5-95 percentiles
:return: a Data object containing the loaded data
"""
pass
@abstractmethod
def load_all_data(self, split, split_type, modality='MR', normalise=True, value_crop=True):
"""
Load all images (labelled and unlabelled) from saved numpy arrays.
Assumes a naming convention of numpy arrays as all_<dataset_name>_images.npz
If numpy arrays are not found, then data is loaded from sources and saved in numpy arrays.
:param split: the split number, e.g. 0, 1
:param split_type: the split type, e.g. training, validation, test, all (for all data)
:param modality: modality to load if the dataset has multimodal data
:param normalise: True/False: normalise images to [-1, 1]
:param value_crop: True/False: crop values between 5-95 percentiles
:return: a Data object containing the loaded data
"""
pass
@abstractmethod
def load_raw_labelled_data(self, normalise=True, value_crop=True):
"""
Load raw data, do preprocessing e.g. normalisation, resampling, value cropping etc
:param normalise: True or False to normalise data
:param value_crop: True or False to crop in the 5-95 percentiles or not.
:return: a pair of arrays (images, index)
"""
pass
@abstractmethod
def load_raw_unlabelled_data(self, include_labelled, normalise=True, value_crop=True):
"""
Load raw data, do preprocessing e.g. normalisation, resampling, value cropping etc
:param include_labelled True or False to include labelled images or not
:param normalise: True or False to normalise data
:param value_crop: True or False to crop in the 5-95 percentiles or not.
:return: a pair of arrays (images, index)
"""
pass
def base_load_unlabelled_images(self, dataset, split, split_type, include_labelled, normalise, value_crop):
npz_prefix_type = 'ul_' if not include_labelled else 'all_'
npz_prefix = npz_prefix_type + 'norm_' if normalise else npz_prefix_type + 'unnorm_'
# Load saved numpy array
if os.path.exists(os.path.join(self.data_folder, npz_prefix + dataset + '_images.npz')):
images = np.load(os.path.join(self.data_folder, npz_prefix + dataset + '_images.npz'))['arr_0']
index = np.load(os.path.join(self.data_folder, npz_prefix + dataset + '_index.npz'))['arr_0']
self.log.debug('Loaded compressed ' + dataset + ' unlabelled data of shape ' + str(images.shape))
# Load from source
else:
images, index = self.load_raw_unlabelled_data(include_labelled, normalise, value_crop)
images = np.expand_dims(images, axis=3)
np.savez_compressed(os.path.join(self.data_folder, npz_prefix + dataset + '_images'), images)
np.savez_compressed(os.path.join(self.data_folder, npz_prefix + dataset + '_index'), index)
assert split_type in ['training', 'validation', 'test', 'all'], 'Unknown split_type: ' + split_type
if split_type == 'all':
return images, index
volumes = self.splits()[split][split_type]
images = np.concatenate([images[index == v] for v in volumes])
index = np.concatenate([index[index==v] for v in volumes])
return images, index
| 48.669421
| 111
| 0.641875
|
c29f0d4062bbd42e4fecbed6e167798392a23769
| 5,001
|
py
|
Python
|
tensorflow_datasets/core/utils/version.py
|
ChAnYaNG97/datasets
|
0a45e2ea98716d325fc1c5e5494f2575f3bdb908
|
[
"Apache-2.0"
] | 1
|
2020-05-24T21:30:50.000Z
|
2020-05-24T21:30:50.000Z
|
tensorflow_datasets/core/utils/version.py
|
ChAnYaNG97/datasets
|
0a45e2ea98716d325fc1c5e5494f2575f3bdb908
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/utils/version.py
|
ChAnYaNG97/datasets
|
0a45e2ea98716d325fc1c5e5494f2575f3bdb908
|
[
"Apache-2.0"
] | 1
|
2020-04-15T19:20:58.000Z
|
2020-04-15T19:20:58.000Z
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version utils."""
import enum
import re
import six
_VERSION_TMPL = (
r"^(?P<major>{v})"
r"\.(?P<minor>{v})"
r"\.(?P<patch>{v})$")
_VERSION_WILDCARD_REG = re.compile(_VERSION_TMPL.format(v=r"\d+|\*"))
_VERSION_RESOLVED_REG = re.compile(_VERSION_TMPL.format(v=r"\d+"))
class Experiment(enum.Enum):
"""Experiments which can be enabled/disabled on a per version basis.
Experiments are designed to gradually apply changes to datasets while
maintaining backward compatibility with previous versions. All experiments
should eventually be deleted, once used by all versions of all datasets.
Eg:
class Experiment(enum.Enum):
EXP_A = enum.auto() # Short description of experiment.
class MyBuilder(...):
VERSION = tfds.core.Version('1.2.3', experiments={
tfds.core.Experiment.EXP_A: True,
})
"""
# A Dummy experiment, which should NOT be used, except for testing.
DUMMY = 1
class Version(object):
"""Dataset version MAJOR.MINOR.PATCH."""
_DEFAULT_EXPERIMENTS = {
Experiment.DUMMY: False,
}
def __init__(self, version_str, description=None, experiments=None,
tfds_version_to_prepare=None):
"""Version init.
Args:
version_str: string. Eg: "1.2.3".
description: string, a description of what is new in this version.
experiments: dict of experiments. See Experiment.
tfds_version_to_prepare: string, defaults to None. If set, indicates that
current version of TFDS cannot be used to `download_and_prepare` the
dataset, but that TFDS at version {tfds_version_to_prepare} should be
used instead.
"""
if description is not None and not isinstance(description, str):
raise TypeError(
"Description should be a string. Got {}".format(description))
self.description = description
self._experiments = self._DEFAULT_EXPERIMENTS.copy()
self.tfds_version_to_prepare = tfds_version_to_prepare
if experiments:
self._experiments.update(experiments)
self.major, self.minor, self.patch = _str_to_version(version_str)
def implements(self, experiment):
"""Returns True if version implements given experiment."""
return self._experiments[experiment]
def __str__(self):
return "{}.{}.{}".format(*self.tuple)
def __repr__(self) -> str:
return f"{type(self).__name__}(\'{str(self)}\')"
@property
def tuple(self):
return self.major, self.minor, self.patch
def _validate_operand(self, other):
if isinstance(other, six.string_types):
return Version(other)
elif isinstance(other, Version):
return other
raise AssertionError("{} (type {}) cannot be compared to version.".format(
other, type(other)))
def __eq__(self, other):
other = self._validate_operand(other)
return self.tuple == other.tuple
def __ne__(self, other):
other = self._validate_operand(other)
return self.tuple != other.tuple
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __le__(self, other):
other = self._validate_operand(other)
return self.tuple <= other.tuple
def __gt__(self, other):
other = self._validate_operand(other)
return self.tuple > other.tuple
def __ge__(self, other):
other = self._validate_operand(other)
return self.tuple >= other.tuple
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return (major in [self.major, "*"] and minor in [self.minor, "*"]
and patch in [self.patch, "*"])
def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = "Invalid version '{}'. Format should be x.y.z".format(version_str)
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(
v if v == "*" else int(v)
for v in [res.group("major"), res.group("minor"), res.group("patch")])
| 32.686275
| 79
| 0.689662
|
f59754d50569c0d113bd4036c76988450cc169ed
| 2,742
|
py
|
Python
|
drf_admin/apps/system/serializers/users.py
|
liu3734/drf_admin
|
f47edff36e761380a36834daa017a3c0808a0505
|
[
"MIT"
] | null | null | null |
drf_admin/apps/system/serializers/users.py
|
liu3734/drf_admin
|
f47edff36e761380a36834daa017a3c0808a0505
|
[
"MIT"
] | null | null | null |
drf_admin/apps/system/serializers/users.py
|
liu3734/drf_admin
|
f47edff36e761380a36834daa017a3c0808a0505
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author : Wang Meng
@github : https://github.com/tianpangji
@software : PyCharm
@file : users.py
@create : 2020/7/1 22:33
"""
import re
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework import serializers
Users = get_user_model()
class UsersSerializer(serializers.ModelSerializer):
"""
用户增删改查序列化器
"""
date_joined = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S', read_only=True)
department_name = serializers.ReadOnlyField(source='department.name')
roles_list = serializers.ReadOnlyField()
is_superuser = serializers.BooleanField(read_only=True)
class Meta:
model = Users
fields = ['id', 'username', 'name', 'mobile', 'email', 'is_active', 'department', 'department_name',
'date_joined', 'roles', 'roles_list', 'is_superuser']
def validate(self, attrs):
# 数据验证
if attrs.get('username'):
if attrs.get('username').isdigit():
raise serializers.ValidationError('用户名不能为纯数字')
if attrs.get('mobile'):
if not re.match(r'^1[3-9]\d{9}$', attrs.get('mobile')):
raise serializers.ValidationError('手机格式不正确')
if attrs.get('mobile') == '':
attrs['mobile'] = None
return attrs
def create(self, validated_data):
user = super().create(validated_data)
# 添加默认密码
user.set_password(settings.DEFAULT_PWD)
user.save()
return user
class UsersPartialSerializer(serializers.ModelSerializer):
"""
用户局部更新(激活/锁定)序列化器
"""
class Meta:
model = Users
fields = ['id', 'is_active']
class ResetPasswordSerializer(serializers.ModelSerializer):
"""
重置密码序列化器
"""
confirm_password = serializers.CharField(write_only=True)
class Meta:
model = Users
fields = ['id', 'password', 'confirm_password']
extra_kwargs = {
'password': {
'write_only': True
}
}
def validate(self, attrs):
# partial_update, 局部更新required验证无效, 手动验证数据
password = attrs.get('password')
confirm_password = attrs.get('confirm_password')
if not password:
raise serializers.ValidationError('字段password为必填项')
if not confirm_password:
raise serializers.ValidationError('字段confirm_password为必填项')
if password != confirm_password:
raise serializers.ValidationError('两次密码不一致')
return attrs
def save(self, **kwargs):
# 重写save方法, 保存密码
self.instance.set_password(self.validated_data.get('password'))
self.instance.save()
return self.instance
| 29.170213
| 108
| 0.619621
|
687d9bdb1bc523d3359e145e4dab5b2deabafbf7
| 1,889
|
py
|
Python
|
tests/01_integration/conftest.py
|
wolcomm/eos-prefix-list-agent
|
a1ec37494048f0f0524ca5ff985838d844c84e4e
|
[
"MIT"
] | 8
|
2019-06-02T23:47:38.000Z
|
2021-08-24T07:30:08.000Z
|
tests/01_integration/conftest.py
|
wolcomm/eos-prefix-list-agent
|
a1ec37494048f0f0524ca5ff985838d844c84e4e
|
[
"MIT"
] | 39
|
2019-04-09T06:21:56.000Z
|
2022-01-29T10:00:37.000Z
|
tests/01_integration/conftest.py
|
wolcomm/eos-prefix-list-agent
|
a1ec37494048f0f0524ca5ff985838d844c84e4e
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the MIT License
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Fixtures for PrefixListAgent integration tests."""
from __future__ import print_function
import time
import pytest
from rptk_stub import RptkStubProcess
@pytest.fixture(scope="module")
def node():
"""Provide a pyeapi node connected to the local unix socket."""
for retry in range(60):
try:
import pyeapi
conn = pyeapi.connect(transport="socket")
node = pyeapi.client.Node(conn)
assert node.version
break
except Exception as e:
time.sleep(3)
continue
else:
raise e
return node
@pytest.fixture(scope="module")
def configure_daemon(node):
"""Configure the agent as an EOS ProcMgr daemon."""
agent_config = [
"trace PrefixListAgent-PrefixListAgent setting PrefixList*/*",
"daemon PrefixListAgent",
"exec /root/bin/PrefixListAgent",
"option rptk_endpoint value http://127.0.0.1:8000/",
"option refresh_interval value 10",
"option update_delay value 1",
"no shutdown"
]
node.config(agent_config)
time.sleep(3)
yield
@pytest.fixture(scope="module")
def rptk_stub():
"""Launch a stub version of an rptk web application."""
process = RptkStubProcess()
process.start()
yield
process.terminate()
process.join()
| 28.621212
| 79
| 0.67549
|
fd6e6421937d23733c2472458296fa9638ed68b2
| 1,036
|
py
|
Python
|
spotify/v1/me/player/device.py
|
geekonedge/spotify
|
1f4cf733a1fb11ab96259ed1e229b141e5c696f3
|
[
"MIT"
] | 2
|
2018-10-10T08:00:47.000Z
|
2021-10-12T04:15:33.000Z
|
spotify/v1/me/player/device.py
|
geekonedge/spotify
|
1f4cf733a1fb11ab96259ed1e229b141e5c696f3
|
[
"MIT"
] | 2
|
2018-08-31T21:59:47.000Z
|
2018-08-31T22:27:57.000Z
|
spotify/v1/me/player/device.py
|
geekonedge/spotify
|
1f4cf733a1fb11ab96259ed1e229b141e5c696f3
|
[
"MIT"
] | 1
|
2018-08-31T21:18:58.000Z
|
2018-08-31T21:18:58.000Z
|
from spotify.object.device import Device
from spotify.page import Page
from spotify.resource import Resource
class DeviceInstance(Resource):
def __init__(self, version, properties):
super(DeviceInstance, self).__init__(version)
self._device = Device.from_json(properties)
@property
def id(self):
return self._device.id
@property
def is_active(self):
return self._device.is_active
@property
def is_restricted(self):
return self._device.is_restricted
@property
def name(self):
return self._device.name
@property
def type(self):
return self._device.type
@property
def volume_percent(self):
return self._device.volume_percent
class DeviceList(Resource):
def list(self):
response = self.version.request('GET', '/me/player/devices')
return DevicePage(self.version, response.json(), 'devices')
class DevicePage(Page):
@property
def instance_class(self):
return DeviceInstance
| 21.142857
| 68
| 0.677606
|
f1b72354618112bdbca91e745a25256fa84bc6a8
| 6,531
|
py
|
Python
|
tests/test_lastseen.py
|
pawelkopka/kopf
|
51a3a70e09a17cf3baec2946b64b125a90595cf4
|
[
"MIT"
] | null | null | null |
tests/test_lastseen.py
|
pawelkopka/kopf
|
51a3a70e09a17cf3baec2946b64b125a90595cf4
|
[
"MIT"
] | null | null | null |
tests/test_lastseen.py
|
pawelkopka/kopf
|
51a3a70e09a17cf3baec2946b64b125a90595cf4
|
[
"MIT"
] | null | null | null |
import json
import pytest
from kopf.structs.lastseen import LAST_SEEN_ANNOTATION
from kopf.structs.lastseen import has_essence_stored, get_essence
from kopf.structs.lastseen import get_essential_diffs
from kopf.structs.lastseen import retrieve_essence, refresh_essence
def test_annotation_is_fqdn():
assert LAST_SEEN_ANNOTATION.startswith('kopf.zalando.org/')
@pytest.mark.parametrize('expected, body', [
pytest.param(False, {}, id='no-metadata'),
pytest.param(False, {'metadata': {}}, id='no-annotations'),
pytest.param(False, {'metadata': {'annotations': {}}}, id='no-lastseen'),
pytest.param(True, {'metadata': {'annotations': {LAST_SEEN_ANNOTATION: ''}}}, id='present'),
])
def test_has_essence(expected, body):
result = has_essence_stored(body=body)
assert result == expected
def test_get_essence_removes_resource_references():
body = {'apiVersion': 'group/version', 'kind': 'Kind'}
essence = get_essence(body=body)
assert essence == {}
@pytest.mark.parametrize('field', [
'uid',
'name',
'namespace',
'selfLink',
'generation',
'finalizers',
'resourceVersion',
'creationTimestamp',
'deletionTimestamp',
'any-unexpected-field',
])
def test_get_essence_removes_system_fields_and_cleans_parents(field):
body = {'metadata': {field: 'x'}}
essence = get_essence(body=body)
assert essence == {}
@pytest.mark.parametrize('field', [
'uid',
'name',
'namespace',
'selfLink',
'generation',
'finalizers',
'resourceVersion',
'creationTimestamp',
'deletionTimestamp',
'any-unexpected-field',
])
def test_get_essence_removes_system_fields_but_keeps_extra_fields(field):
body = {'metadata': {field: 'x', 'other': 'y'}}
essence = get_essence(body=body, extra_fields=['metadata.other'])
assert essence == {'metadata': {'other': 'y'}}
@pytest.mark.parametrize('annotation', [
pytest.param(LAST_SEEN_ANNOTATION, id='kopf'),
pytest.param('kubectl.kubernetes.io/last-applied-configuration', id='kubectl'),
])
def test_get_essence_removes_garbage_annotations_and_cleans_parents(annotation):
body = {'metadata': {'annotations': {annotation: 'x'}}}
essence = get_essence(body=body)
assert essence == {}
@pytest.mark.parametrize('annotation', [
pytest.param(LAST_SEEN_ANNOTATION, id='kopf'),
pytest.param('kubectl.kubernetes.io/last-applied-configuration', id='kubectl'),
])
def test_get_essence_removes_garbage_annotations_but_keeps_others(annotation):
body = {'metadata': {'annotations': {annotation: 'x', 'other': 'y'}}}
essence = get_essence(body=body)
assert essence == {'metadata': {'annotations': {'other': 'y'}}}
def test_get_essence_removes_status_and_cleans_parents():
body = {'status': {'kopf': {'progress': 'x', 'anything': 'y'}, 'other': 'z'}}
essence = get_essence(body=body)
assert essence == {}
def test_get_essence_removes_status_but_keeps_extra_fields():
body = {'status': {'kopf': {'progress': 'x', 'anything': 'y'}, 'other': 'z'}}
essence = get_essence(body=body, extra_fields=['status.other'])
assert essence == {'status': {'other': 'z'}}
def test_get_essence_clones_body():
body = {'spec': {'depth': {'field': 'x'}}}
essence = get_essence(body=body)
body['spec']['depth']['field'] = 'y'
assert essence is not body
assert essence['spec'] is not body['spec']
assert essence['spec']['depth'] is not body['spec']['depth']
assert essence['spec']['depth']['field'] == 'x'
def test_refresh_essence():
body = {'spec': {'depth': {'field': 'x'}}}
patch = {}
encoded = json.dumps(body) # json formatting can vary across interpreters
refresh_essence(body=body, patch=patch)
assert patch['metadata']['annotations'][LAST_SEEN_ANNOTATION] == encoded
def test_retreive_essence_when_present():
data = {'spec': {'depth': {'field': 'x'}}}
encoded = json.dumps(data) # json formatting can vary across interpreters
body = {'metadata': {'annotations': {LAST_SEEN_ANNOTATION: encoded}}}
essence = retrieve_essence(body=body)
assert essence == data
def test_retreive_essence_when_absent():
body = {}
essence = retrieve_essence(body=body)
assert essence is None
def test_essence_changed_detected():
data = {'spec': {'depth': {'field': 'x'}}}
encoded = json.dumps(data) # json formatting can vary across interpreters
body = {'metadata': {'annotations': {LAST_SEEN_ANNOTATION: encoded}}}
old, new, diff = get_essential_diffs(body=body)
assert diff
def test_essence_change_ignored_with_garbage_annotations():
data = {'spec': {'depth': {'field': 'x'}}}
encoded = json.dumps(data) # json formatting can vary across interpreters
body = {'metadata': {'annotations': {LAST_SEEN_ANNOTATION: encoded}},
'spec': {'depth': {'field': 'x'}}}
old, new, diff = get_essential_diffs(body=body)
assert not diff
def test_essence_changed_ignored_with_system_fields():
data = {'spec': {'depth': {'field': 'x'}}}
encoded = json.dumps(data) # json formatting can vary across interpreters
body = {'metadata': {'annotations': {LAST_SEEN_ANNOTATION: encoded},
'finalizers': ['x', 'y', 'z'],
'generation': 'x',
'resourceVersion': 'x',
'creationTimestamp': 'x',
'deletionTimestamp': 'x',
'any-unexpected-field': 'x',
'uid': 'uid',
},
'status': {'kopf': {'progress': 'x', 'anything': 'y'},
'other': 'x'
},
'spec': {'depth': {'field': 'x'}}}
old, new, diff = get_essential_diffs(body=body)
assert not diff
# This is to ensure it is callable with proper signature.
# For actual tests of diffing, see `/tests/diffs/`.
def test_essence_diff():
data = {'spec': {'depth': {'field': 'x'}}}
encoded = json.dumps(data) # json formatting can vary across interpreters
body = {'metadata': {'annotations': {LAST_SEEN_ANNOTATION: encoded}},
'status': {'x': 'y'},
'spec': {'depth': {'field': 'y'}}}
old, new, diff = get_essential_diffs(body=body, extra_fields=['status.x'])
assert old == {'spec': {'depth': {'field': 'x'}}}
assert new == {'spec': {'depth': {'field': 'y'}}, 'status': {'x': 'y'}}
assert len(diff) == 2 # spec.depth.field & status.x, but the order is not known.
| 36.082873
| 96
| 0.635125
|
ecc9ccb82fd4ae56ac6a14dadaffa13326ca3fac
| 8,594
|
py
|
Python
|
morf-python-api/morf/utils/config.py
|
jpgard/morf
|
f17afcacef68929a5ce9e7714208be1002a42418
|
[
"MIT"
] | 14
|
2018-06-27T13:15:46.000Z
|
2021-08-30T08:24:38.000Z
|
morf-python-api/morf/utils/config.py
|
jpgard/morf
|
f17afcacef68929a5ce9e7714208be1002a42418
|
[
"MIT"
] | 58
|
2018-02-03T15:31:15.000Z
|
2019-10-15T02:12:05.000Z
|
morf-python-api/morf/utils/config.py
|
jpgard/morf
|
f17afcacef68929a5ce9e7714208be1002a42418
|
[
"MIT"
] | 7
|
2018-03-29T14:47:34.000Z
|
2021-06-22T01:34:52.000Z
|
# Copyright (c) 2018 The Regents of the University of Michigan
# and the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Functions for working with (reading writing, modifying) MORF configuration files.
"""
import boto3
import configparser
import fileinput
import json
import logging
import multiprocessing
import os
import re
from morf.utils import get_bucket_from_url, get_key_from_url
from morf.utils.security import generate_md5
def get_config_properties(config_file="config.properties", sections_to_fetch = None):
"""
Returns the list of properties as a dict of key/value pairs in the file config.properties.
:param config_file: filename (string).
:param section: name of section to fetch properties from (if specified); all sections are returned by default (iterable).
:return: A flat (no sections) Python dictionary of properties.
"""
cf = configparser.ConfigParser()
try:
cf.read(config_file)
except Exception as e:
print("[ERROR] exception {} reading configurations from file {}".format(e, config_file))
properties = {}
for section in cf.sections():
# only include args section if requested
if (not sections_to_fetch or (section in sections_to_fetch)):
for item in cf.items(section):
properties[item[0]] = item[1]
return properties
def combine_config_files(*args, outfile="config.properties"):
"""
Combine multiple config files into single config file located at outfile.
:param args: names of config files to combine.
:param outfile: pathname to write to.
:return: None
"""
with open(outfile, "w") as fout, fileinput.input(args) as fin:
for line in fin:
fout.write(line)
return
def update_config_fields_in_section(section, config_file="config.properties", **kwargs):
"""
Overwrite (or create, if not exists) fields in section of config_file with values provided according to kwargs.
:param section: section header within config file which contains the field to be modified.
:param kwargs: named parameters, with values, to overwrite.
:param config_file: path to config properties; should be valid ConfigParser file
:return:
"""
cf = configparser.ConfigParser()
try:
cf.read(config_file)
except Exception as e:
print("[ERROR] exception {} reading configurations from file {}".format(e, config_file))
cf_new = configparser.ConfigParser()
for section in cf.sections():
for item in cf.items(section):
try:
cf_new[section][item[0]] = item[1]
except KeyError: # section doesn't exist yet
cf_new[section] = {}
cf_new[section][item[0]] = item[1]
for key, value in kwargs.items():
try:
cf_new[section][key] = value
except KeyError:
print(
"[ERROR] error updating config file: {}; possibly attempted to update a section that does not exist".format(
e))
try:
os.remove(config_file)
with open(config_file, "w") as cfwrite:
cf_new.write(cfwrite)
except Exception as e:
print("[ERROR] error updating config file: {}".format(e))
return
def fetch_data_buckets_from_config(config_file="config.properties", data_section="data",
required_bucket_dir_name='morf-data/'):
"""
Fetch the buckets from data_section of config_file; warn if key does not exactle match directory_name.
:param config_file: path to config file.
:param data_section: section of config file with key-value pairs representing institution names and s3 paths.
:param required_bucket_dir_name: directory or path that should match ALL values in data_section; if not, throws warning.
:return: list of buckets to iterate over; no directories are returned because these should be uniform across all of the buckets.
"""
cf = configparser.ConfigParser()
cf.read(config_file)
buckets = []
for item in cf.items(data_section):
item_url = item[1]
bucket = get_bucket_from_url(item_url)
dir = get_key_from_url(item_url)
if dir != required_bucket_dir_name:
msg = "[ERROR]: specified path {} does not match required directory name {}; change name of directories to be consistent or specify the correct directory to check for.".format(
item_url, required_bucket_dir_name)
print(msg)
raise
else:
buckets.append(bucket)
assert len(buckets) >= 1
return tuple(buckets)
class MorfJobConfig:
"""
Job-level configurations; these should remain consistent across entire workflow of a job.
"""
def __init__(self, config_file):
self.type = "morf" # todo: delete this
self.mode = None
self.status = "START"
properties = get_config_properties(config_file)
self.client_args = get_config_properties(config_file, sections_to_fetch="args")
# add properties to class as attributes
for prop in properties.items():
setattr(self, prop[0], prop[1])
# if client_args are specified, add these to job_id to ensure unique
if self.client_args:
self.generate_job_id()
# fetch raw data buckets as list
self.raw_data_buckets = fetch_data_buckets_from_config()
self.generate_morf_id(config_file)
# if maximum number of cores is not specified, set to one less than half of current machine's cores; otherwise cast to int
self.setcores()
def generate_job_id(self):
"""
Generate and set a unique job_id by appending client-supplied arg names and values.
This makes submitting multiple jobs by simply altering the 'args' field much easier for users.
:return: None
"""
new_job_id = self.job_id
for arg_name, arg_value in sorted(self.client_args.items()):
name = re.sub("[./]", "", arg_name)
value = re.sub("[./]", "", arg_value)
new_job_id += '_'.join([name, value])
setattr(self, "job_id", new_job_id)
return
def generate_morf_id(self, config_file):
"""
Generate a unique MORF identifier via hashing of the config file.
:param config_file:
:return:
"""
self.morf_id = generate_md5(config_file)
def check_configurations(self):
# todo: check that all arguments are valid/acceptable
pass
def update_status(self, status):
# todo: check whether status is valid by comparing with allowed values
self.status = status
def update_email_to(self, email_to):
# todo: check if email is valid
self.email_to = email_to
def update_mode(self, mode):
# todo: check whether mode is valid by comparing with allowed values
self.mode = mode
def initialize_s3(self):
# create s3 connection object for communicating with s3
s3obj = boto3.client("s3",
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
return s3obj
def setcores(self):
if not hasattr(self, "max_num_cores"):
n_cores = multiprocessing.cpu_count()
self.max_num_cores = max(n_cores//2 - 1, 1)
else:
n_cores = int(self.max_num_cores)
self.max_num_cores = n_cores
return
| 40.158879
| 188
| 0.671166
|
34b2fc8089b5b117ff1693342a4e180ea477d736
| 452
|
py
|
Python
|
_modules/neutronv2/auto_alloc.py
|
NDPF/salt-formula-neutron
|
758f3350fa541a41174105c92c0b9cceb6951d81
|
[
"Apache-2.0"
] | 3
|
2017-06-30T18:09:44.000Z
|
2017-11-04T18:24:39.000Z
|
_modules/neutronv2/auto_alloc.py
|
NDPF/salt-formula-neutron
|
758f3350fa541a41174105c92c0b9cceb6951d81
|
[
"Apache-2.0"
] | 10
|
2017-02-25T21:39:01.000Z
|
2018-09-19T07:53:46.000Z
|
_modules/neutronv2/auto_alloc.py
|
NDPF/salt-formula-neutron
|
758f3350fa541a41174105c92c0b9cceb6951d81
|
[
"Apache-2.0"
] | 21
|
2017-02-01T18:12:51.000Z
|
2019-04-29T09:29:01.000Z
|
from neutronv2.common import send
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
@send('get')
def auto_alloc_get_details(project_id, **kwargs):
url = '/auto-allocated-topology/{}?{}'.format(
project_id, urlencode(kwargs)
)
return url, {}
@send('delete')
def auto_alloc_delete(project_id, **kwargs):
url = '/auto-allocated-topology/{}'.format(project_id)
return url, {}
| 21.52381
| 58
| 0.688053
|
c5a3ffe5aa46824da8062e9a6b5454f33e456021
| 6,409
|
py
|
Python
|
src/execute_script.py
|
rrhuffy/hybristools
|
1c91ffd929f7a1752ec2c1737325c5fa50a159da
|
[
"MIT"
] | 2
|
2021-03-17T00:16:04.000Z
|
2021-03-20T08:07:21.000Z
|
src/execute_script.py
|
rrhuffy/hybristools
|
1c91ffd929f7a1752ec2c1737325c5fa50a159da
|
[
"MIT"
] | null | null | null |
src/execute_script.py
|
rrhuffy/hybristools
|
1c91ffd929f7a1752ec2c1737325c5fa50a159da
|
[
"MIT"
] | 2
|
2021-03-22T13:53:00.000Z
|
2022-01-07T16:28:43.000Z
|
#!/usr/bin/env python3
import argparse
import logging
import re
import sys
from bs4 import BeautifulSoup
from lib import argparse_helper
from lib import hybris_argparse_helper
from lib import hybris_requests_helper
from lib import logging_helper
from lib import requests_helper
class ScriptExecutionResponse:
def __init__(self, output_text, execution_result, error_message):
self.output_text = output_text
self.execution_result = execution_result
self.error_message = error_message
def __repr__(self):
return f'ScriptExecutionResponse({repr(self.output_text)}, ' \
f'{repr(self.execution_result)}, {repr(self.error_message)})'
def __str__(self):
if self.error_message:
if self.output_text:
# both output and error message available
return f'Output:\n{self.output_text}\nError:\n{self.error_message}'
else:
# only error message available
return self.error_message
if self.output_text and not self.execution_result:
# only output text available
return self.output_text
elif self.execution_result and not self.output_text:
# only execution_result available
return self.execution_result
elif self.output_text and self.execution_result:
# both output and execution result available
return f'Output:\n{self.output_text}\nResult:\n{self.execution_result}'
else:
logging.debug('Neither output nor execution result available')
return ''
def execute_script(script, script_type, rollback, address, user, password, session=None):
# TODO: check if address is set here, because it will fail
if session is None:
session, address = requests_helper.get_session_with_basic_http_auth_and_cleaned_address(address)
credentials = {'user': user, 'password': password}
hybris_requests_helper.log_into_hac_and_return_csrf_or_exit(session, address, credentials)
script_get_result = session.get(address + '/console/scripting/')
script_csrf_token = re.search(r'name="_csrf"\s+value="(.+?)"\s*/>', script_get_result.text).group(1)
form_data = {'script': script, '_csrf': script_csrf_token, 'scriptType': script_type, 'commit': not rollback}
form_data_without_script = {k: v for k, v in form_data.items() if k != 'script'}
logging.debug(f'form_data_without_script: {form_data_without_script}')
logging.debug('...executing...')
execute_address = address + '/console/scripting/execute'
script_post_result = session.post(execute_address, data=form_data)
logging.debug('done, printing results:')
if script_post_result.status_code == 500:
bs = BeautifulSoup(script_post_result.text, 'html.parser')
html = bs.find('textarea').text
number_of_lines_to_show = 20
first_n_lines = '\n'.join(html.strip().split('\n')[0:number_of_lines_to_show])
msg = f'Received HTTP500, printing first {number_of_lines_to_show} lines of result:\n{first_n_lines}'
return ScriptExecutionResponse(None, None, msg)
elif script_post_result.status_code == 504:
msg = (f'Received HTTP504 Gateway Timeout Error after {int(script_post_result.elapsed.total_seconds())}s while '
f'executing POST with script to execute in {execute_address}. '
f'\nAdd loggers to your script and check result in Hybris logs')
return ScriptExecutionResponse(None, None, msg)
result_json = script_post_result.json()
logging.debug(result_json)
if not result_json:
return ScriptExecutionResponse('No result', None, None)
elif result_json.get('stacktraceText', None):
return ScriptExecutionResponse(result_json['outputText'].strip(), None, result_json['stacktraceText'])
else:
return ScriptExecutionResponse(result_json['outputText'].strip(), result_json['executionResult'], None)
def _handle_cli_arguments():
parser = argparse.ArgumentParser('Script that executes given beanshell/groovy script')
hybris_argparse_helper.add_hybris_hac_arguments(parser)
parser.add_argument('script',
help='string with beanshell/groovy file '
'or string with script (use literal \\n for newline) '
'or "-" if piping script')
parser.add_argument('type', help='type of script', choices=['groovy', 'beanshell'])
# TODO: maybe instead of "--parameters 1 2 3 4" accept "1 2 3 4" as last parameters? (what about optional limit?)
parser.add_argument('--parameters', '-p', nargs='*',
help='arguments to put into script by replacing with $1, $2 etc')
parser.add_argument('--rollback', action='store_true', help='Execute script in rollback mode')
logging_helper.add_logging_arguments_to_parser(parser)
args = parser.parse_args()
script = argparse_helper.get_text_from_string_or_file_or_pipe(args.script)
assert script is not None, 'Cannot load script'
if args.parameters:
for i, parameter in enumerate(args.parameters):
parameter_to_replace = f'${i + 1}'
if parameter_to_replace not in script:
print(f'WARN: Unexpected parameter {parameter_to_replace} with value {repr(parameter)}')
script = script.replace(parameter_to_replace, parameter)
next_parameter = f'${len(args.parameters) + 1}'
if next_parameter in script:
print(f"WARN: Probably you should provide additional parameter for replacing with {next_parameter}")
elif '$1' in script:
print("No parameters given, but $1 found in query, probably you've forgotten to add parameter")
logging.debug('Full script:')
logging.debug(script)
return args, script
def main():
logging_helper.run_ipython_on_exception()
args, script = _handle_cli_arguments()
wrapped_execute_script = logging_helper.decorate_method_with_pysnooper_if_needed(execute_script, args.logging_level)
response = wrapped_execute_script(script, args.type, args.rollback, args.address, args.user, args.password)
assert isinstance(response, ScriptExecutionResponse)
logging.debug(f'Response: {repr(response)}')
print(response)
sys.exit(1 if response.error_message else 0)
if __name__ == '__main__':
main()
| 47.125
| 120
| 0.697145
|
a012245c6a67836083b6a8eb4618caba4ce8e40d
| 6,516
|
py
|
Python
|
scripts/exp_rl_discriminator.py
|
TUIlmenauAMS/rl_singing_voice
|
60204c698d48f27b44588c9d6c8dd2c66a13fcd5
|
[
"MIT"
] | 19
|
2020-03-02T19:52:46.000Z
|
2021-12-15T00:38:45.000Z
|
scripts/exp_rl_discriminator.py
|
TUIlmenauAMS/rl_singing_voice
|
60204c698d48f27b44588c9d6c8dd2c66a13fcd5
|
[
"MIT"
] | 3
|
2020-06-28T13:02:16.000Z
|
2021-04-22T03:31:26.000Z
|
scripts/exp_rl_discriminator.py
|
TUIlmenauAMS/rl_singing_voice
|
60204c698d48f27b44588c9d6c8dd2c66a13fcd5
|
[
"MIT"
] | 3
|
2021-01-19T07:44:40.000Z
|
2021-12-15T00:38:25.000Z
|
# -*- coding: utf-8 -*-
__author__ = 'S.I. Mimilakis'
__copyright__ = 'Fraunhofer IDMT'
# imports
import numpy as np
import torch
from nn_modules import losses
from tools import helpers, visualize, nn_loaders
from settings.rl_disc_experiment_settings import exp_settings
from torch.distributions import Normal
def perform_frontend_training():
print('ID: ' + exp_settings['exp_id'])
# Instantiating data handler
io_dealer = helpers.DataIO(exp_settings=exp_settings)
# Number of file sets
num_of_sets = exp_settings['num_of_multitracks']//exp_settings['set_size']
# Initialize modules
# Initialize modules
if exp_settings['visualize']:
win_viz, win_viz_b = visualize.init_visdom() # Web loss plotting
analysis, synthesis = nn_loaders.build_frontend_model(flag='training', exp_settings=exp_settings)
disc = nn_loaders.build_discriminator(flag='training', exp_settings=exp_settings)
sigmoid = torch.nn.Sigmoid()
# Expected shapes
data_shape = (exp_settings['batch_size'], exp_settings['d_p_length'] * exp_settings['fs'])
noise_sampler = Normal(torch.zeros(data_shape), torch.ones(data_shape)*exp_settings['noise_scalar'])
# Initialize optimizer and add the parameters that will be updated
parameters_list = list(analysis.parameters()) + list(synthesis.parameters()) + list(disc.parameters())
optimizer = torch.optim.Adam(parameters_list, lr=exp_settings['learning_rate'])
# Start of the training
batch_indx = 0
for epoch in range(1, exp_settings['epochs'] + 1):
for file_set in range(1, num_of_sets + 1):
# Load a sub-set of the recordings
_, vox, bkg = io_dealer.get_data(file_set, exp_settings['set_size'], monaural=exp_settings['monaural'])
# Create batches
vox = io_dealer.gimme_batches(vox)
bkg = io_dealer.gimme_batches(bkg)
# Compute the total number of batches contained in this sub-set
num_batches = vox.shape[0] // exp_settings['batch_size']
# Compute permutations for random shuffling
perm_in_vox = np.random.permutation(vox.shape[0])
perm_in_bkg = np.random.permutation(bkg.shape[0])
for batch in range(num_batches):
shuf_ind_vox = perm_in_vox[batch * exp_settings['batch_size']: (batch + 1) * exp_settings['batch_size']]
shuf_ind_bkg = perm_in_bkg[batch * exp_settings['batch_size']: (batch + 1) * exp_settings['batch_size']]
vox_tr_batch = io_dealer.batches_from_numpy(vox[shuf_ind_vox, :])
bkg_tr_batch = io_dealer.batches_from_numpy(bkg[shuf_ind_bkg, :])
vox_var = torch.autograd.Variable(vox_tr_batch, requires_grad=False)
bkg_var = torch.autograd.Variable(bkg_tr_batch, requires_grad=False)
mix_var = torch.autograd.Variable(vox_tr_batch + bkg_tr_batch, requires_grad=False)
# Sample noise
noise = torch.autograd.Variable(noise_sampler.sample().cuda().float(), requires_grad=False)
# 0 Mean
vox_var -= vox_var.mean()
bkg_var -= bkg_tr_batch.mean()
mix_var -= mix_var.mean()
# Target source forward pass
vox_coeff = analysis.forward(vox_var + noise)
waveform = synthesis.forward(vox_coeff, use_sorting=exp_settings['dict_sorting'])
# Mixture and Background signals forward pass
mix_coeff = analysis.forward(mix_var)
bkg_coeff = analysis.forward(bkg_var)
# Loss functions
rec_loss = losses.neg_snr(vox_var, waveform)
smt_loss = exp_settings['lambda_reg'] * losses.tot_variation_2d(mix_coeff)
loss = rec_loss + smt_loss
# Optimize for reconstruction & smoothness
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
# Optimize with discriminator
# Remove silent frames
c_loud_x = (10. * (vox_tr_batch.norm(2., dim=1, keepdim=True).log10())).data.cpu().numpy()
# Which segments are below the threshold?
loud_locs = np.where(c_loud_x > exp_settings['loudness_threshold'])[0]
vox_coeff = vox_coeff[loud_locs]
if vox_coeff.size(0) > 2:
# Make sure we are getting unmatched pairs
bkg_coeff = bkg_coeff[loud_locs]
vox_coeff_shf = vox_coeff[np.random.permutation(vox_coeff.size(0))]
# Sample from discriminator
y_neg = sigmoid(disc.forward(vox_coeff, bkg_coeff))
y_pos = sigmoid(disc.forward(vox_coeff, vox_coeff_shf))
# Compute discriminator loss
disc_loss = losses.bce(y_pos, y_neg)
# Optimize the discriminator
optimizer.zero_grad()
disc_loss.backward()
optimizer.step()
else:
pass
if exp_settings['visualize']:
# Visualization
win_viz = visualize.viz.line(X=np.arange(batch_indx, batch_indx + 1),
Y=np.reshape(rec_loss.item(), (1,)),
win=win_viz, update='append')
win_viz_b = visualize.viz.line(X=np.arange(batch_indx, batch_indx + 1),
Y=np.reshape(disc_loss.item(), (1,)),
win=win_viz_b, update='append')
batch_indx += 1
if not torch.isnan(loss) and not torch.isnan(disc_loss):
print('--- Saving Model ---')
torch.save(analysis.state_dict(), 'results/analysis_' + exp_settings['exp_id'] + '.pytorch')
torch.save(synthesis.state_dict(), 'results/synthesis_' + exp_settings['exp_id'] + '.pytorch')
torch.save(disc.state_dict(), 'results/disc_' + exp_settings['exp_id'] + '.pytorch')
else:
break
return None
if __name__ == "__main__":
np.random.seed(218)
torch.manual_seed(218)
torch.cuda.manual_seed(218)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# Training
perform_frontend_training()
# EOF
| 43.152318
| 120
| 0.601443
|
86ca517bb4ccb6ef7a5494f4ec2440ec231905b4
| 26,306
|
py
|
Python
|
notebooks/__code/bragg_edge/bragg_edge_sample_and_powder.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/bragg_edge/bragg_edge_sample_and_powder.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/bragg_edge/bragg_edge_sample_and_powder.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
import random
import os
import glob
from pathlib import Path
from IPython.core.display import HTML
from IPython.display import display
import numpy as np
from plotly.offline import iplot
import plotly.graph_objs as go
from ipywidgets import widgets
import logging
from neutronbraggedge.experiment_handler import *
from NeuNorm.normalization import Normalization
from NeuNorm.roi import ROI
from __code import file_handler
from __code.bragg_edge.bragg_edge import BraggEdge as BraggEdgeParent
from __code.bragg_edge.bragg_edge import Interface
from __code.file_folder_browser import FileFolderBrowser
from __code import ipywe
from __code._utilities.file import get_full_home_file_name
LOG_FILE_NAME = ".bragg_edge_normalization.log"
class BraggEdge(BraggEdgeParent):
o_interface = None
select_ob_widget = None
def __init__(self, working_dir="./"):
super(BraggEdge, self).__init__(working_dir=working_dir)
self.log_file_name = get_full_home_file_name(LOG_FILE_NAME)
logging.basicConfig(filename=self.log_file_name,
filemode='w',
format='[%(levelname)s] - %(asctime)s - %(message)s',
level=logging.INFO)
logging.info("*** Starting a new session ***")
def load_data(self, folder_selected):
logging.info(f"Loading data from {folder_selected}")
self.o_norm = Normalization()
self.load_files(data_type='sample', folder=folder_selected)
# define time spectra file
folder = os.path.dirname(self.o_norm.data['sample']['file_name'][0])
self.list_files = self.o_norm.data['sample']['file_name']
self.data = self.o_norm.data['sample']['data']
self.data_folder_name = os.path.basename(folder)
spectra_file = glob.glob(os.path.join(folder, '*_Spectra.txt'))
logging.info(f"> looking for spectra file: {spectra_file}")
if spectra_file:
logging.info(f"-> spectra file FOUND!")
self.spectra_file = spectra_file[0]
display(HTML('<span style="font-size: 15px; color:blue"> Spectra File automatically located: ' + \
self.spectra_file + '</span>'))
else:
# ask for spectra file
logging.info(f"-> spectra file NOT FOUND! Asking user to select time spectra file")
self.select_time_spectra_file()
def select_time_spectra_file(self):
self.working_dir = os.path.dirname(self.list_files[0])
self.time_spectra_ui = ipywe.fileselector.FileSelectorPanel(instruction='Select Time Spectra File ...',
start_dir=self.working_dir,
next=self.save_time_spectra,
filters={'spectra_file': "_Spectra.txt"},
multiple=False)
self.time_spectra_ui.show()
self.cancel_button = widgets.Button(description="or Do Not Select any Time Spectra",
button_style="info",
layout=widgets.Layout(width='100%'))
display(self.cancel_button)
self.cancel_button.on_click(self.cancel_time_spectra_selection)
def save_time_spectra(self, file):
BraggEdgeParent.save_time_spectra(self, file)
logging.info(f"Time spectra file loaded: {file}")
self.cancel_button.close()
def cancel_time_spectra_selection(self, value):
logging.info(f"User cancel loading time spectra!")
self.time_spectra_ui.remove()
self.cancel_button.close()
display(HTML('<span style="font-size: 20px; color:blue">NO Spectra File loaded! </span>'))
def load_files(self, data_type='sample', folder=None):
self.starting_dir = os.path.dirname(folder)
if data_type == 'sample':
self.data_folder_name = os.path.basename(folder)
list_files = glob.glob(os.path.join(folder, '*.fits'))
if list_files == []:
list_files = glob.glob(os.path.join(folder, '*.tif*'))
else: # fits
# keep only files of interest
list_files = [file for file in list_files if not "_SummedImg.fits" in file]
list_files = [file for file in list_files if ".fits" in file]
# sort list of files
list_files.sort()
logging.info(f"load files:")
logging.info(f"-> data type: {data_type}")
logging.info(f"-> nbr of files: {len(list_files)}")
o_norm = Normalization()
o_norm.load(file=list_files, notebook=True, data_type=data_type)
if data_type == 'sample':
self.o_norm = o_norm
elif data_type == 'ob':
self.o_norm.data['ob'] = o_norm.data['ob']
display(HTML('<span style="font-size: 15px; color:blue">' + str(len(list_files)) + \
' files have been loaded as ' + data_type + '</span>'))
def get_nbr_of_images_to_use_in_preview(self):
nbr_images = len(self.o_norm.data['sample']['data'])
init_value = np.int(nbr_images / 10)
if init_value == 0:
init_value = 1
return init_value
def normalization_settings_widgets(self):
# with ob
## button
self.select_ob_widget = widgets.Button(description="Select OB ...",
button_style="success",
layout=widgets.Layout(width="100%"))
self.select_ob_widget.on_click(self.select_ob_folder)
## space
spacer = widgets.HTML(value="<hr>")
## nbr of images to use
nbr_images_to_use_label = widgets.Label("Nbr of images to use in preview",
layout=widgets.Layout(width="20%"))
nbr_of_images_to_use_in_preview = self.get_nbr_of_images_to_use_in_preview()
self.nbr_images_slider_with_ob = widgets.IntSlider(min=2,
max=len(self.list_files),
value=nbr_of_images_to_use_in_preview,
layout=widgets.Layout(width="80%"))
hbox_1 = widgets.HBox([nbr_images_to_use_label, self.nbr_images_slider_with_ob])
self.select_roi_widget_with_ob = widgets.Button(description="OPTIONAL: Select Region of interest away from "
"sample "
"to "
"improve normalization",
layout=widgets.Layout(width="100%"))
self.select_roi_widget_with_ob.on_click(self.select_roi_with_ob)
vbox_with_ob = widgets.VBox([self.select_ob_widget,
spacer,
hbox_1,
spacer,
self.select_roi_widget_with_ob,
])
# without ob
## nbr of images to use
self.nbr_images_slider_without_ob = widgets.IntSlider(min=2,
max=len(self.list_files),
value=nbr_of_images_to_use_in_preview,
layout=widgets.Layout(width="80%"))
hbox_without_ob = widgets.HBox([nbr_images_to_use_label, self.nbr_images_slider_without_ob])
select_roi_widget_without_ob = widgets.Button(description="MANDATORY: Select region of interest "
"away from "
"sample",
button_style="success",
layout=widgets.Layout(width="100%"))
select_roi_widget_without_ob.on_click(self.select_roi_without_ob)
vbox_without_ob = widgets.VBox([hbox_without_ob,
spacer,
select_roi_widget_without_ob
])
self.accordion = widgets.Accordion(children=[vbox_with_ob, vbox_without_ob])
self.accordion.set_title(0, "With OB")
self.accordion.set_title(1, "Without OB")
display(self.accordion)
def select_roi_with_ob(self, status):
nbr_data_to_use = np.int(self.nbr_images_slider_with_ob.value)
self.select_roi(nbr_data_to_use=nbr_data_to_use)
def select_roi_without_ob(self, status):
nbr_data_to_use = np.int(self.nbr_images_slider_without_ob.value)
self.select_roi(nbr_data_to_use=nbr_data_to_use)
def select_roi(self, nbr_data_to_use=2):
self.o_interface = Interface(data=self.get_image_to_use_for_display(nbr_data_to_use=nbr_data_to_use),
instruction="Select region outside sample!",
next=self.after_selecting_roi)
self.o_interface.show()
def after_selecting_roi(self):
if self.accordion.selected_index == 0:
# with OB
self.select_roi_widget_with_ob.button_style = ""
elif self.accordion.selected_index == 1:
# without OB
self.select_roi_widget_without_ob.button_style = ""
def select_ob_folder(self, status=None):
select_data = ipywe.fileselector.FileSelectorPanel(instruction='Select OB Folder ...',
start_dir=self.starting_dir,
next=self.load_ob,
type='directory',
multiple=False)
select_data.show()
def load_ob(self, folder_selected):
self.load_files(data_type='ob', folder=folder_selected)
self.check_data_array_sizes()
if self.select_ob_widget:
self.select_ob_widget.button_style = ""
self.select_roi_widget_with_ob.button_style = "success"
def check_data_array_sizes(self):
len_ob = len(self.o_norm.data['ob']['file_name'])
len_sample = len(self.o_norm.data['sample']['file_name'])
if len_ob == len_sample:
display(HTML('<span style="font-size: 15px; color:green"> Sample and OB have the same size!</span>'))
return
if len_ob < len_sample:
self.o_norm.data['sample']['data'] = self.o_norm.data['sample']['data'][0:len_ob]
self.o_norm.data['sample']['file_name'] = self.o_norm.data['sample']['file_name'][0:len_ob]
display(HTML('<span style="font-size: 15px; color:green"> Truncated Sample array to match OB!</span>'))
else:
self.o_norm.data['ob']['data'] = self.o_norm.data['ob']['data'][0:len_sample]
self.o_norm.data['ob']['file_name'] = self.o_norm.data['ob']['file_name'][0:len_sample]
display(HTML('<span style="font-size: 15px; color:green"> Truncated OB array to match Sample!</span>'))
def load_time_spectra(self):
_tof_handler = TOF(filename=self.spectra_file)
_exp = Experiment(tof=_tof_handler.tof_array,
distance_source_detector_m=np.float(self.dSD_ui.value),
detector_offset_micros=np.float(self.detector_offset_ui.value))
nbr_sample = len(self.o_norm.data['sample']['file_name'])
self.lambda_array = _exp.lambda_array[0: nbr_sample] * 1e10 # to be in Angstroms
self.tof_array = _tof_handler.tof_array[0: nbr_sample]
def how_many_data_to_use_to_select_sample_roi(self):
nbr_images = len(self.o_norm.data['sample']['data'])
init_value = np.int(nbr_images / 10)
if init_value == 0:
init_value = 1
box1 = widgets.HBox([widgets.Label("Nbr of images to use:",
layout=widgets.Layout(width='15')),
widgets.IntSlider(value=init_value,
max=nbr_images,
min=1)])
# layout=widgets.Layout(width='50%'))])
box2 = widgets.Label("(The more you select, the longer it will take to display the preview!)")
vbox = widgets.VBox([box1, box2])
display(vbox)
self.number_of_data_to_use_ui = box1.children[1]
def get_image_to_use_for_display(self, nbr_data_to_use=2):
_data = self.o_norm.data['sample']['data']
nbr_images = len(_data)
list_of_indexes_to_keep = random.sample(list(range(nbr_images)), nbr_data_to_use)
final_array = []
for _index in list_of_indexes_to_keep:
final_array.append(_data[_index])
final_image = np.mean(final_array, axis=0)
self.final_image = final_image
return final_image
def normalization(self):
if self.o_interface:
list_rois = self.o_interface.roi_selected
else:
list_rois = None
if self.accordion.selected_index == 0:
# with ob
self.normalization_with_ob(list_rois=list_rois)
elif self.accordion.selected_index == 1:
# without ob
self.normalization_without_ob(list_rois=list_rois)
self.export_normalized_data()
def normalization_without_ob(self, list_rois):
logging.info("Running normalization without OB")
if list_rois is None:
logging.info("-> no ROIs found! At least one ROI must be provided. Normalization Aborted!")
display(HTML('<span style="font-size: 15px; color:red"> You need to provide a ROI!</span>'))
return
else:
list_o_roi = []
for key in list_rois.keys():
roi = list_rois[key]
_x0 = roi['x0']
_y0 = roi['y0']
_x1 = roi['x1']
_y1 = roi['y1']
list_o_roi.append(ROI(x0=_x0,
y0=_y0,
x1=_x1,
y1=_y1))
logging.info(f"-> Normalization with {len(list_o_roi)} ROIs")
self.o_norm.normalization(roi=list_o_roi,
use_only_sample=True,
notebook=True,
force=True)
display(HTML('<span style="font-size: 15px; color:green"> Normalization DONE! </span>'))
logging.info(f"-> Done!")
def normalization_with_ob(self, list_rois):
logging.info("Running normalization with OB")
if list_rois is None:
logging.info("-> no roi used!")
self.o_norm.normalization(force=True)
else:
list_o_roi = []
for key in list_rois.keys():
roi = list_rois[key]
_x0 = roi['x0']
_y0 = roi['y0']
_x1 = roi['x1']
_y1 = roi['y1']
list_o_roi.append(ROI(x0=_x0,
y0=_y0,
x1=_x1,
y1=_y1))
logging.info(f"-> Normalization with {len(list_o_roi)} ROIs")
self.o_norm.normalization(roi=list_o_roi,
notebook=True,
force=True)
display(HTML('<span style="font-size: 15px; color:green"> Normalization DONE! </span>'))
logging.info(f"-> Done!")
def export_normalized_data(self):
self.o_folder = FileFolderBrowser(working_dir=self.working_dir,
next_function=self.export_normalized_data_step2,
ipts_folder=self.ipts_folder)
self.o_folder.select_output_folder_with_new(instruction="Select where to create the normalized data ...")
def export_normalized_data_step2(self, output_folder):
logging.info(f"export normalized data")
logging.info(f"-> output_folder: {output_folder}")
output_folder = os.path.abspath(output_folder)
self.o_folder.list_output_folders_ui.shortcut_buttons.close()
normalized_export_folder = str(Path(output_folder) / (self.data_folder_name + '_normalized'))
file_handler.make_or_reset_folder(normalized_export_folder)
self.o_norm.export(folder=normalized_export_folder)
display(HTML('<span style="font-size: 15px; color:green"> Created the normalized data in the folder ' +
normalized_export_folder + '</span>'))
if self.spectra_file:
logging.info(f"-> time spectra copied to output folder!")
file_handler.copy_files_to_folder(list_files=[self.spectra_file],
output_folder=normalized_export_folder)
display(HTML('<span style="font-size: 15px; color:green"> Copied time spectra file to same folder </span>'))
else:
logging.info(f"->No time spectra copied!")
def calculate_counts_vs_file_index_of_regions_selected(self, list_roi=None):
self.list_roi = list_roi
data = self.o_norm.get_sample_data()
nbr_data = len(data)
box_ui = widgets.HBox([widgets.Label("Calculate Counts vs lambda",
layout=widgets.Layout(width='20%')),
widgets.IntProgress(min=0,
max=nbr_data,
value=0,
layout=widgets.Layout(width='50%'))])
progress_bar = box_ui.children[1]
display(box_ui)
counts_vs_file_index = []
for _index, _data in enumerate(data):
if len(list_roi) == 0:
_array_data = _data
else:
_array_data = []
for _roi in list_roi.keys():
x0 = np.int(list_roi[_roi]['x0'])
y0 = np.int(list_roi[_roi]['y0'])
x1 = np.int(list_roi[_roi]['x1'])
y1 = np.int(list_roi[_roi]['y1'])
_array_data.append(np.nanmean(_data[y0:y1, x0:x1]))
counts_vs_file_index.append(np.nanmean(_array_data))
progress_bar.value = _index + 1
self.counts_vs_file_index = counts_vs_file_index
box_ui.close()
def plot(self):
trace = go.Scatter(
x=self.lambda_array,
y=self.counts_vs_file_index,
mode='markers')
layout = go.Layout(
height=500,
title="Average transmission vs TOF (of entire images, or of selected region if any)",
xaxis=dict(
title="Lambda (Angstroms)"
),
yaxis=dict(
title="Average Transmission"
),
)
data = [trace]
figure = go.Figure(data=data, layout=layout)
# powder bragg edges
bragg_edges = self.bragg_edges
hkl = self.hkl
max_x = 6
# format hkl labels
_hkl_formated = {}
for _material in hkl:
_hkl_string = []
for _hkl in hkl[_material]:
_hkl_s = ",".join(str(x) for x in _hkl)
_hkl_s = _material + "\n" + _hkl_s
_hkl_string.append(_hkl_s)
_hkl_formated[_material] = _hkl_string
for y_index, _material in enumerate(bragg_edges):
for _index, _value in enumerate(bragg_edges[_material]):
if _value > max_x:
continue
bragg_line = {"type": "line",
'x0' : _value,
'x1' : _value,
'yref': "paper",
'y0' : 0,
'y1' : 1,
'line': {
'color': 'rgb(255, 0, 0)',
'width': 1
}}
figure.add_shape(bragg_line)
# layout.shapes.append(bragg_line)
y_off = 1 - 0.25 * y_index
# add labels to plots
_annot = dict(
x=_value,
y=y_off,
text=_hkl_formated[_material][_index],
yref="paper",
font=dict(
family="Arial",
size=16,
color="rgb(150,50,50)"
),
showarrow=True,
arrowhead=3,
ax=0,
ay=-25)
figure.add_annotation(_annot)
iplot(figure)
def select_output_data_folder(self):
o_folder = FileFolderBrowser(working_dir=self.working_dir,
next_function=self.export_data)
o_folder.select_output_folder(instruction="Select where to create the ascii file...")
def make_output_file_name(self, output_folder='', input_folder=''):
file_name = os.path.basename(input_folder) + "_counts_vs_lambda_tof.txt"
return os.path.join(os.path.abspath(output_folder), file_name)
def export_data(self, output_folder):
input_folder = os.path.dirname(self.o_norm.data['sample']['file_name'][0])
output_file_name = self.make_output_file_name(output_folder=output_folder,
input_folder=input_folder)
lambda_array = self.lambda_array
counts_vs_file_index = self.counts_vs_file_index
tof_array = self.tof_array
metadata = ["# input folder: {}".format(input_folder)]
list_roi = self.list_roi
if len(list_roi) == 0:
metadata.append("# Entire sample selected")
else:
for index, key in enumerate(list_roi.keys()):
roi = list_roi[key]
_x0 = roi['x0']
_y0 = roi['y0']
_x1 = roi['x1']
_y1 = roi['y1']
metadata.append("# ROI {}: x0={}, y0={}, x1={}, y1={}".format(index,
_x0,
_y0,
_x1,
_y1))
metadata.append("#")
metadata.append("# tof (micros), lambda (Angstroms), Average transmission")
data = []
for _t, _l, _c in zip(tof_array, lambda_array, counts_vs_file_index):
data.append("{}, {}, {}".format(_t, _l, _c))
file_handler.make_ascii_file(metadata=metadata,
data=data,
output_file_name=output_file_name,
dim='1d')
if os.path.exists(output_file_name):
display(HTML('<span style="font-size: 20px; color:blue">Ascii file ' + output_file_name + ' has been ' +
'created </span>'))
else:
display(HTML('<span style="font-size: 20px; color:red">Error exporting Ascii file ' + output_file_name +
'</span>'))
def select_output_table_folder(self):
o_folder = FileFolderBrowser(working_dir=self.working_dir,
next_function=self.export_table)
o_folder.select_output_folder()
def export_table(self, output_folder):
material = self.handler.material[0]
lattice = self.handler.lattice[material]
crystal_structure = self.handler.crystal_structure[material]
metadata = ["# material: {}".format(material),
"# crystal structure: {}".format(crystal_structure),
"# lattice: {} Angstroms".format(lattice),
"#",
"# hkl, d(angstroms), BraggEdge"]
data = []
bragg_edges = self.bragg_edges[material]
hkl = self.hkl[material]
for _index in np.arange(len(bragg_edges)):
_hkl_str = [str(i) for i in hkl[_index]]
_hkl = "".join(_hkl_str)
_bragg_edges = np.float(bragg_edges[_index])
_d = _bragg_edges / 2.
_row = "{}, {}, {}".format(_hkl, _d, _bragg_edges)
data.append(_row)
output_file_name = os.path.join(output_folder, 'bragg_edges_of_{}.txt'.format(material))
file_handler.make_ascii_file(metadata=metadata,
data=data,
dim='1d',
output_file_name=output_file_name)
display(HTML('<span style="font-size: 20px; color:blue">File created : ' + \
output_file_name + '</span>'))
def select_folder(self, message="", next_function=None):
folder_widget = ipywe.fileselector.FileSelectorPanel(instruction='select {} folder'.format(message),
start_dir=self.working_dir,
next=next_function,
type='directory',
multiple=False)
folder_widget.show()
| 44.586441
| 120
| 0.527522
|
a9c8fafc8b75bd9e1dc3131811a4cc8342104c23
| 7,072
|
py
|
Python
|
django_gui/test_django_server_api.py
|
timburbank/openrvdas
|
ba77d3958075abd21ff94a396e4a97879962ac0c
|
[
"BSD-2-Clause"
] | null | null | null |
django_gui/test_django_server_api.py
|
timburbank/openrvdas
|
ba77d3958075abd21ff94a396e4a97879962ac0c
|
[
"BSD-2-Clause"
] | null | null | null |
django_gui/test_django_server_api.py
|
timburbank/openrvdas
|
ba77d3958075abd21ff94a396e4a97879962ac0c
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
###
###
"""Note: the Django tests don't run properly when run via normal unittesting, so we need to run them via "./manage.py test". Disabled until we figure out how to force it to use the test database."""
import logging
import os
import sys
import unittest
import warnings
from django.test import TestCase
from os.path import dirname, realpath; sys.path.append(dirname(dirname(realpath(__file__))))
from django_gui.django_server_api import DjangoServerAPI
sample_test_0 = {
"cruise": {
"id": "test_0",
"start": "2017-01-01",
"end": "2017-02-01"
},
"loggers": {
"knud": {
"configs": ["off", "knud->net", "knud->net/file"]
},
"gyr1": {
"configs": ["off", "gyr1->net", "gyr1->net/file"]
},
"mwx1": {
"configs": ["off", "mwx1->net", "mwx1->net/file"]
},
"s330": {
"configs": ["off", "s330->net", "s330->net/file"]
}
},
"modes": {
"off": {
"knud": "off",
"gyr1": "off",
"mwx1": "off",
"s330": "off"
},
"port": {
"knud": "off",
"gyr1": "gyr1->net",
"mwx1": "mwx1->net",
"s330": "off"
},
"underway": {
"knud": "knud->net/file",
"gyr1": "gyr1->net/file",
"mwx1": "mwx1->net/file",
"s330": "s330->net/file"
}
},
"default_mode": "off",
"configs": {
"off": {},
"knud->net": {"knud":"config knud->net"},
"gyr1->net": {"gyr1":"config gyr1->net"},
"mwx1->net": {"mwx1":"config mwx1->net"},
"s330->net": {"s330":"config s330->net"},
"knud->net/file": {"knud":"config knud->net/file"},
"gyr1->net/file": {"gyr1":"config gyr1->net/file"},
"mwx1->net/file": {"mwx1":"config mwx1->net/file"},
"s330->net/file": {"s330":"config s330->net/file"}
}
}
sample_test_1 = {
"cruise": {
"id": "test_1",
"start": "2017-01-01",
"end": "2017-02-01"
},
"loggers": {
"knud": {
"configs": ["off", "knud->net", "knud->net/file"]
},
"gyr1": {
"configs": ["off", "gyr1->net", "gyr1->net/file"]
},
"mwx1": {
"configs": ["off", "mwx1->net", "mwx1->net/file"]
},
"s330": {
"configs": ["off", "s330->net", "s330->net/file"]
}
},
"modes": {
"off": {
"knud": "off",
"gyr1": "off",
"mwx1": "off",
"s330": "off"
},
"port": {
"knud": "off",
"gyr1": "gyr1->net",
"mwx1": "mwx1->net",
"s330": "off"
},
"underway": {
"knud": "knud->net/file",
"gyr1": "gyr1->net/file",
"mwx1": "mwx1->net/file",
"s330": "s330->net/file"
}
},
"default_mode": "off",
"configs": {
"off": {},
"knud->net": {"knud":"config knud->net"},
"gyr1->net": {"gyr1":"config gyr1->net"},
"mwx1->net": {"mwx1":"config mwx1->net"},
"s330->net": {"s330":"config s330->net"},
"knud->net/file": {"knud":"config knud->net/file"},
"gyr1->net/file": {"gyr1":"config gyr1->net/file"},
"mwx1->net/file": {"mwx1":"config mwx1->net/file"},
"s330->net/file": {"s330":"config s330->net/file"}
}
}
################################################################################
class TestDjangoServerAPI(TestCase):
############################
@unittest.skipUnless('test' in sys.argv, 'test_django_server_api.py must be run by running "./manager.py test gui"')
def test_basic(self):
api = DjangoServerAPI()
try:
api.delete_configuration()
except ValueError:
pass
try:
api.delete_configuration()
except ValueError:
pass
api.load_configuration(sample_test_0)
self.assertEqual(api.get_modes(), ['off', 'port', 'underway'])
self.assertEqual(api.get_active_mode(), 'off')
self.assertDictEqual(api.get_logger_configs(),
{'knud': {'name': 'off'},
'gyr1': {'name': 'off'},
'mwx1': {'name': 'off'},
's330': {'name': 'off'}
})
with self.assertRaises(ValueError):
api.set_active_mode('invalid mode')
api.set_active_mode('underway')
self.assertEqual(api.get_active_mode(), 'underway')
self.assertDictEqual(api.get_logger_configs(),
{'knud': {'knud':'config knud->net/file',
'name': 'knud->net/file'},
'gyr1': {'gyr1':'config gyr1->net/file',
'name': 'gyr1->net/file'},
'mwx1': {'mwx1':'config mwx1->net/file',
'name': 'mwx1->net/file'},
's330': {'s330':'config s330->net/file',
'name': 's330->net/file'}})
with self.assertRaises(ValueError):
api.get_logger_configs('invalid_mode')
api.load_configuration(sample_test_1)
self.assertEqual(api.get_logger_configs('port'),
{'gyr1': {'gyr1':'config gyr1->net',
'name': 'gyr1->net'},
'knud': {'name': 'off'},
'mwx1': {'mwx1':'config mwx1->net',
'name': 'mwx1->net'},
's330': {'name': 'off'}
})
self.assertDictEqual(api.get_loggers(),
{'knud': {
'configs': ['off', 'knud->net', 'knud->net/file'],
'active': 'off'
},
'gyr1': {
'configs': ['off', 'gyr1->net', 'gyr1->net/file'],
'active': 'off'
},
'mwx1': {
'configs': ['off', 'mwx1->net', 'mwx1->net/file'],
'active': 'off'
},
's330': {
'configs': ['off', 's330->net', 's330->net/file'],
'active': 'off'}
})
api.delete_configuration()
with self.assertRaises(ValueError):
api.get_configuration()
self.assertDictEqual(api.get_logger_configs(), {})
################################################################################
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbosity', dest='verbosity',
default=0, action='count',
help='Increase output verbosity')
args = parser.parse_args()
LOGGING_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
LOG_LEVELS ={0:logging.WARNING, 1:logging.INFO, 2:logging.DEBUG}
args.verbosity = min(args.verbosity, max(LOG_LEVELS))
logging.getLogger().setLevel(LOG_LEVELS[args.verbosity])
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(warnings='ignore')
from django.core.management import execute_from_command_line
execute_from_command_line(['dummy', 'test', 'gui.test_django_server_api'])
| 31.571429
| 198
| 0.479214
|
c281f26ac1aa92589e736b11431716a3d6b3f1ad
| 36,775
|
py
|
Python
|
femagtools/ts.py
|
dapu/femagtools
|
95eaf750adc2013232cdf482e523b3900ac6eb08
|
[
"BSD-2-Clause"
] | null | null | null |
femagtools/ts.py
|
dapu/femagtools
|
95eaf750adc2013232cdf482e523b3900ac6eb08
|
[
"BSD-2-Clause"
] | null | null | null |
femagtools/ts.py
|
dapu/femagtools
|
95eaf750adc2013232cdf482e523b3900ac6eb08
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Classes for post processing based on vtu-files of created by FEMAG-TS
"""
__author__ = 'werner b. vetter, ronald tanner'
import femagtools.nc
import femagtools.vtu as vtu
import numpy as np
import scipy.integrate as integrate
import warnings
def losscoeff_frequency_to_time(B0, f0, c, exp):
'''Convert Bertotti-coefficient of frequency domain to time domains coefficient
Parameters
----------
B0 : float
Base flux density [T]
f0 : float
Base freuency [Hz]
c : float
Bertotti-coefficient
exp : float
Bertotti-exponent
Return
-------
k : float
Loss coefficient in time domains
The conversion is only possible for loss-coefficients with
equal exponent for flux density and frequency,
as eddy current losses (cw*(B/B0(*)*2*(f/f0)**2) or
anomalous losses (ce*(B/B0(*)**1.5*(f/f0)**1.5)
'''
y, abserr = integrate.quad(lambda x: np.abs(np.cos(2*np.pi*x))**exp, 0, 1)
return c/(B0**exp*f0**exp)/((2*np.pi)**exp * y)
class TimeRange(object):
def __init__(self, vtu_data, nc_model):
'''Read time vector in and generate an equidistant vector if necessary.
Also the base frequency is determined.
Parameters
----------
vtu_data : object
vtu reader
nc_model: object
'''
try: # FEMAG-TS files
data_list = ['time [s]']
vtu_data.read_data(data_list)
self.vector = vtu_data.get_data_vector('time [s]')
self.freq = 1/(self.vector[-1]-self.vector[0] +
(self.vector[1]-self.vector[0])/2 +
(self.vector[-1]-self.vector[-2])/2)
dt = self.vector[1]-self.vector[0]
dt_min = 1e32
self.equidistant = True
for i in range(len(self.vector)-2):
dti = self.vector[i+1]-self.vector[i]
if dt < 0.999*dti or dt > 1.001*dti:
self.equidistant = False
if dti < dt_min:
dt_min = dti
if not self.equidistant:
numpnt = int((self.vector[-1]-self.vector[0])/dt_min)
self.vector_equi = np.linspace(self.vector[0],
self.vector[-1],
num=numpnt)
except: # FEMAG-DC files
speed = nc_model.speed
self.freq = speed/60*nc_model.pole_pairs
self.equidistant = True
class Losses(object):
def __init__(self, modelname, dirname):
'''Loss calculation for FEMAG-TS simulations
Parameters
----------
dirname : str
Name of the model (nc-file)
ncmodel : object
'''
self.vtu_data = vtu.read(dirname)
self.nc_model = femagtools.nc.read(modelname)
# Read iron losses coefficients
self.iron_loss_coefficients = self.nc_model.iron_loss_coefficients
for c in self.iron_loss_coefficients:
if c['cw_freq_exp'] == c['cw_ind_exp']:
c['kw'] = losscoeff_frequency_to_time(
c['base_induction'],
c['base_frequency'],
c['cw'], c['cw_freq_exp'])
else:
warnings.warn(
'Waterfall method not possible, specify parameter kw')
kw = 0.0
def ohm_lossenergy_sr(self, sr):
'''Ohmic loss energy of a subregion
Parameters
----------
sr : object
Subregion
Returns
-------
lossenergy : float
Ohmic loss energy of the subregion
The loss energy is determined by adding up the loss energy of the
individual elements.
'''
scale_factor = self.nc_model.scale_factor()
length = self.nc_model.arm_length
time = self.time_vector
srlossenergy = 0.0
for supel in sr.superelements:
selossenergy = 0.0
if supel.conduc > 0.0:
ff = supel.fillfactor
if ff == 0.0:
ff = 1.0
#print(supel.key, supel.conduc, supel.length, ff)
for el in supel.elements:
#print(el.key, el.area)
ellossenergy = 0.0
cd_vec = self.vtu_data.get_data_vector('curd', el.key)
for j in range(len(time)-1):
cd = (cd_vec[j]+cd_vec[j+1])/2
dt = time[j+1]-time[j]
ellossenergy = ellossenergy + dt*cd**2*el.area/ff/supel.conduc*supel.length
selossenergy = selossenergy + ellossenergy*length*scale_factor
srlossenergy = srlossenergy + selossenergy
return srlossenergy
def ohm_lossenergy_subregion(self, srname, start=0.0, end=0.0):
'''Ohmic loss energy of a subregion
Parameters
----------
srname: str
Name of subregion
start: float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
lossenergy : float
Ohmic loss energy of the subregion
The loss energy is determined by adding up the loss energy of the
individual elements over the time window.
If start and end are not specified, the time window of the
previous calculation is used.
'''
data_list = ['time [s]', 'curd']
self.vtu_data.read_data(data_list)
if start != 0.0 or end != 0.0:
self.vtu_data.set_time_window(start, end)
self.time_vector = self.vtu_data.get_data_vector('time [s]')
sr = self.nc_model.get_subregion(srname)
srlossenergy = self.ohm_lossenergy_sr(sr)
return srlossenergy
def ohm_powerlosses_subregion(self, srname, start=0.0, end=0.0):
'''Ohmic loss dissipation of a subregion within the time window
Parameters
----------
srname : str
Name of subregion
start : float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
powerlosses : float
Ohmic loss dissipation of the subregion
The loss energy is determined by adding up the loss energy of the
individual elements over the time window.
The loss energy is divided by the time window length
to obtain the averaged power loss
If start and end are not specified, the time window of the
previous calculation is used.
'''
while len(srname) < 4:
srname = srname+' '
srlossenergy = self.ohm_lossenergy_subregion(srname, start, end)
srpowerlosses = srlossenergy/(self.time_vector[-1]-self.time_vector[0])
return srpowerlosses
def ohm_lossenergy(self, start=0.0, end=0.0):
'''Ohmic loss energy of all subregions
Parameters
----------
start: float
Start of the time window (optional)
end: float
End of the time window (optional)
Returns
-------
loss_data: dict
Dictonary of subregions and ohmic loss energy of it
The loss energy is determined by adding up the loss energy of the
individual elements over the time window.
If start and end are not specified, the time window of the
previous calculation is used.
'''
data_list = ['time [s]', 'curd']
self.vtu_data.read_data(data_list)
if start != 0.0 or end != 0.0:
self.vtu_data.set_time_window(start, end)
self.time_vector = self.vtu_data.get_data_vector('time [s]')
loss_data = []
for sr in self.nc_model.subregions:
srlossenergy = self.ohm_lossenergy_sr(sr)
srname = sr.name
if sr.wb_key >= 0:
#print(sr.key,"is winding",sr.wb_key+1)
if srname == ' ':
srname = "wdg "+str(sr.wb_key+1)
loss_data.append(
{'key': sr.key, 'name': srname, 'losses': srlossenergy})
return loss_data
def ohm_powerlosses(self, start=0.0, end=0.0):
'''Ohmic loss dissipation of all subregions
Parameters
----------
start : float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
loss_data : dict
Dictonary of subregions and ohmic loss dissipation of it
The loss energy is determined by adding up the loss energy of the
individual elements over the time window.
The loss energy is divided by the time window length
to obtain the averaged power loss
If start and end are not specified, the time window of the
previous calculation is used.
'''
data_list = ['time [s]', 'curd']
self.vtu_data.read_data(data_list)
if start != 0.0 or end != 0.0:
self.vtu_data.set_time_window(start, end)
self.time_vector = self.vtu_data.get_data_vector('time [s]')
loss_data = []
for sr in self.nc_model.subregions:
srlossenergy = self.ohm_lossenergy_sr(sr)
srpowerlosses = srlossenergy / \
(self.time_vector[-1]-self.time_vector[0])
srname = sr.name
if sr.wb_key >= 0:
#print(sr.key,"is winding",sr.wb_key+1)
if srname == ' ':
srname = "wdg "+str(sr.wb_key+1)
loss_data.append(
{'key': sr.key, 'name': srname, 'losses': srpowerlosses})
return loss_data
def ohm_powerlosses_fft_sr(self, sr):
'''Power dissipation of a subregion
Parameters
----------
sr : object
Subregion
Returns
-------
powerlosses : float
Ohmic power losses of the subregion
A FFT from the current density is made.
The power losses of each harmonic is determined and added.
'''
scale_factor = self.nc_model.scale_factor()
length = self.nc_model.arm_length
srpowerlosses = 0.0
for supel in sr.superelements:
sepowerlosses = 0.0
if supel.conduc > 0.0:
ff = supel.fillfactor
if ff == 0.0:
ff = 1.0
#print(supel.key, supel.conduc, supel.length, ff)
for el in supel.elements:
#print(el.key, el.area)
elpowerlosses = 0.0
cd_vec_0 = self.vtu_data.get_data_vector('curd', el.key)
if not self.times.equidistant:
cd_vec = np.interp(self.times.vector_equi,
self.times.vector, cd_vec_0,
period=1.0/self.times.freq)
# f = interpolate.interp1d(self.times.vector, cd_vec_0, kind="cubic")
# cd_vec = f(self.times.vector_equi)
else:
cd_vec = cd_vec_0
cd_spec = abs(np.fft.fft(cd_vec))/(len(cd_vec)/2)
for j in range(int(len(cd_vec)/2)):
elpowerlosses = elpowerlosses + \
cd_spec[j]**2/2*el.area/ff / \
supel.conduc*supel.length
sepowerlosses = sepowerlosses + elpowerlosses*length*scale_factor
srpowerlosses = srpowerlosses + sepowerlosses
return srpowerlosses
def ohm_powerlosses_fft_subregion(self, srname, start=0.0, end=0.0):
'''Power dissipation of a subregion
Parameters
----------
srname: str
Name of subregion
start: float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
lossenergy : float
Power dissipation of the subregion
A FFT from the current density is made.
The power losses of each harmonic is determined and added.
The time window has to be pariode or a multiple of it.
If start and end are not specified, the time window of the
previous calculation is used.
'''
data_list = ['time [s]', 'curd']
self.vtu_data.read_data(data_list)
if start != 0.0 or end != 0.0:
self.vtu_data.set_time_window(start, end)
self.times = TimeRange(self.vtu_data, self.nc_model)
sr = self.nc_model.get_subregion(srname)
srpowerlosses = self.ohm_powerlosses_fft_sr(sr)
return srpowerlosses
def ohm_powerlosses_fft(self, start=0.0, end=0.0):
'''Power dissipation of all subregions
Parameters
----------
start : float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
loss_data : dict
Dictonary of subregions and power dissipation of it
A FFT from the current density is made.
The power losses of each harmonic is determined and added.
The time window has to be pariode or a multiple of it.
If start and end are not specified, the time window of the
previous calculation is used.
'''
data_list = ['time [s]', 'curd']
self.vtu_data.read_data(data_list)
if start != 0.0 or end != 0.0:
self.vtu_data.set_time_window(start, end)
self.times = TimeRange(self.vtu_data, self.nc_model)
loss_data = []
for sr in self.nc_model.subregions:
srpowerlosses = self.ohm_powerlosses_fft_sr(sr)
srname = sr.name
if sr.wb_key >= 0:
#print(sr.key,"is winding",sr.wb_key+1)
if srname == ' ':
srname = "wdg "+str(sr.wb_key+1)
loss_data.append(
{'key': sr.key, 'name': srname, 'losses': srpowerlosses})
return loss_data
# iron losses
def iron_losses_fft_se(self, se):
'''Iron losses of a superelement
Parameters
----------
se: object
Superelement
Returns
-------
ironlosses : float
Iron losses of the superlement
A FFT is made from the flux density.
The iron losses of each harmonic is determined by
Bertotti formula
Physt = ch * (f/f0)**hfe * (B/B0)**hBe * V * rho
Peddy = ch * (f/f0)**wfe * (B/B0)**wBe * V * rho
Pexce = ch * (f/f0)**efe * (B/B0)**eBe * V * rho
and added to the total losses of the superelement
Ptot = (Physt + Peddy + Pexce) * shape_factor
'''
scale_factor = self.nc_model.scale_factor()
length = self.nc_model.arm_length
freq = self.times.freq
sehystlosses = 0.0
seeddylosses = 0.0
seexcelosses = 0.0
if se.elements[0].reluc[0] < 1.0 or se.elements[0].reluc[1] < 1.0:
center_pnt = se.elements[0].center
if (np.sqrt(center_pnt[0]**2+center_pnt[1]**2) > self.nc_model.FC_RADIUS):
ldi = len(self.iron_loss_coefficients)-2 # outside
else:
ldi = len(self.iron_loss_coefficients)-1 # inside
sf = self.iron_loss_coefficients[ldi]['shapefactor']
if (se.mcvtype > 0):
ldi = se.mcvtype-1
bf = self.iron_loss_coefficients[ldi]['base_frequency']
bb = self.iron_loss_coefficients[ldi]['base_induction']
ch = self.iron_loss_coefficients[ldi]['ch']
chfe = self.iron_loss_coefficients[ldi]['ch_freq_exp']
chbe = self.iron_loss_coefficients[ldi]['ch_ind_exp']
cw = self.iron_loss_coefficients[ldi]['cw']
cwfe = self.iron_loss_coefficients[ldi]['cw_freq_exp']
cwbe = self.iron_loss_coefficients[ldi]['cw_ind_exp']
ce = self.iron_loss_coefficients[ldi]['ce']
cefe = self.iron_loss_coefficients[ldi]['ce_freq_exp']
cebe = self.iron_loss_coefficients[ldi]['ce_ind_exp']
sw = self.iron_loss_coefficients[ldi]['spec_weight']*1000
ff = self.iron_loss_coefficients[ldi]['fillfactor']
for el in se.elements:
#print(el.key, el.area)
elhystlosses = 0.0
eleddylosses = 0.0
elexcelosses = 0.0
bx_vec_0 = self.vtu_data.get_data_vector('b', el.key)[0]
if not self.times.equidistant:
bx_vec = np.interp(self.times.vector_equi,
self.times.vector, bx_vec_0,
period=1.0/self.times.freq)
# f = interpolate.interp1d(self.times.vector, bx_vec_0, kind="cubic")
# bx_vec = f(self.times.vector_equi)
else:
bx_vec = bx_vec_0
bx_spec = abs(np.fft.fft(bx_vec))/(len(bx_vec)/2)
by_vec_0 = self.vtu_data.get_data_vector('b', el.key)[1]
if not self.times.equidistant:
by_vec = np.interp(self.times.vector_equi,
self.times.vector, by_vec_0,
period=1.0/self.times.freq)
# f = interpolate.interp1d(self.times.vector, by_vec_0, kind="cubic")
# by_vec = f(self.times.vector_equi)
else:
by_vec = by_vec_0
by_spec = abs(np.fft.fft(by_vec))/(len(by_vec)/2)
b_spec = np.sqrt((bx_spec**2+by_spec**2))
for j in range(int(len(b_spec)/2)):
elhystlosses = elhystlosses + ch * \
(j*freq/bf)**chfe*(b_spec[j]/bb)**chbe
eleddylosses = eleddylosses + cw * \
(j*freq/bf)**cwfe*(b_spec[j]/bb)**cwbe
elexcelosses = elexcelosses + ce * \
(j*freq/bf)**cefe*(b_spec[j]/bb)**cebe
sehystlosses = sehystlosses + elhystlosses*el.area*length*ff*sf*sw*scale_factor
seeddylosses = seeddylosses + eleddylosses*el.area*length*ff*sf*sw*scale_factor
seexcelosses = seexcelosses + elexcelosses*el.area*length*ff*sf*sw*scale_factor
setotallosses = sehystlosses + seeddylosses + seexcelosses
return {'total': setotallosses,
'hysteresis': sehystlosses,
'eddycurrent': seeddylosses,
'excess': seexcelosses}
def iron_losses_fft_subregion(self, srname, start=0.0, end=0.0):
'''Iron losses of a subregion
Parameters
----------
srname: str
Name of subregion
start: float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
losses : dict
Iron losses of the subregion
The iron losses are calculated based on the Bertotti formula
(see also ron_losses_fft_se)
'''
if start != 0.0 or end != 0.0:
self.vtu_data.set_time_window(start, end)
data_list = ['b']
self.vtu_data.read_data(data_list)
self.times = TimeRange(self.vtu_data, self.nc_model)
srtotallosses = 0.0
srhystlosses = 0.0
sreddylosses = 0.0
srexcelosses = 0.0
sr = self.nc_model.get_subregion(srname)
for se in sr.superelements:
selosses = self.iron_losses_fft_se(se)
srtotallosses = srtotallosses + selosses['total']
srhystlosses = srhystlosses + selosses['hysteresis']
sreddylosses = sreddylosses + selosses['eddycurrent']
srexcelosses = srexcelosses + selosses['excess']
srlosses = {'subregion': srname,
'total': srtotallosses,
'hysteresis': srhystlosses,
'eddycurrent': sreddylosses,
'excess': srexcelosses
}
return srlosses
def iron_losses_fft(self, start=0.0, end=0.0):
'''Iron losses of all subregion and superelements
Parameters
----------
start: float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
losses : dict
Iron losses of the subregion
The iron losses are calculated based on the Bertotti formula
(see also iron_losses_fft_se)
'''
if start != 0.0 or end != 0.0:
self.vtu_data.set_time_window(start, end)
data_list = ['b']
self.vtu_data.read_data(data_list)
self.times = TimeRange(self.vtu_data, self.nc_model)
losseslist = []
for se in self.nc_model.superelements:
selosses = self.iron_losses_fft_se(se)
if se.subregion:
for sr in self.nc_model.subregions:
if se in sr.superelements:
srname = sr.name
#print(se.key, "in", sr.key, sr.name)
else:
if (se.mcvtype == 0):
center_pnt = se.elements[0].center
if (np.sqrt(center_pnt[0]**2+center_pnt[1]**2) > self.nc_model.FC_RADIUS):
srname = "no, outside"
else:
srname = "no, inside"
found = False
for srlosses in losseslist:
if srlosses['subregion'] == srname:
srlosses['total'] = srlosses['total']+selosses['total']
srlosses['hysteresis'] = srlosses['hysteresis'] + \
selosses['hysteresis']
srlosses['eddycurrent'] = srlosses['eddycurrent'] + \
selosses['eddycurrent']
srlosses['excess'] = srlosses['excess']+selosses['excess']
found = True
if not found:
if selosses['total'] > 0.0:
srlosses = {'subregion': srname,
'total': selosses['total'],
'hysteresis': selosses['hysteresis'],
'eddycurrent': selosses['eddycurrent'],
'excess': selosses['excess']
}
losseslist.append(srlosses)
return losseslist
def iron_lossenergy_time_se(self, se):
'''Iron losses of a superelement in time domain
Parameters
----------
se: object
Superelement
Returns
-------
lossenergies : float
Iron losses of the superlement
The iron losses are calculated based on the Bertotti formula
in time domaine.
The loss coefficients in frequency domain are converted into
time domain coefficients.
For the hysteresis losses is a water fall methode implemented.
Eddy current losses and anomalous losses are calculated by
add up the losses of each time step.
'''
scale_factor = self.nc_model.scale_factor()
length = self.nc_model.arm_length
time = self.times.vector
sehystenergy = 0.0
seeddyenergy = 0.0
seexceenergy = 0.0
if se.elements[0].reluc[0] < 1.0 or se.elements[0].reluc[1] < 1.0:
if (se.mcvtype == 0):
center_pnt = se.elements[0].center
if (np.sqrt(center_pnt[0]**2+center_pnt[1]**2) > self.nc_model.FC_RADIUS):
ldi = len(self.iron_loss_coefficients)-2 # outside
else:
ldi = len(self.iron_loss_coefficients)-1 # inside
else:
ldi = se.mcvtype-1
kh = self.iron_loss_coefficients[ldi]['kh']
chbe = self.iron_loss_coefficients[ldi]['ch_ind_exp']
khml = self.iron_loss_coefficients[ldi]['khml']
kw = self.iron_loss_coefficients[ldi]['kw']
cwbe = self.iron_loss_coefficients[ldi]['cw_ind_exp']
ke = self.iron_loss_coefficients[ldi]['ke']
cebe = self.iron_loss_coefficients[ldi]['ce_ind_exp']
sw = self.iron_loss_coefficients[ldi]['spec_weight']*1000
ff = self.iron_loss_coefficients[ldi]['fillfactor']
sf = self.iron_loss_coefficients[ldi]['shapefactor']
for el in se.elements:
elhystenergy = 0.0
eleddyenergy = 0.0
elexceenergy = 0.0
bx_vec = self.vtu_data.get_data_vector('b', el.key)[0]
by_vec = self.vtu_data.get_data_vector('b', el.key)[1]
# Maximalwert und Richtung des Haupfeldes
Bpeak = np.sqrt(bx_vec[0]**2+by_vec[0]**2)
phi = np.arctan2(by_vec[0], bx_vec[0])
for i in range(1, len(time)):
b1 = np.sqrt(bx_vec[i-1]**2+by_vec[i-1]**2)
b2 = np.sqrt(bx_vec[i]**2+by_vec[i]**2)
if abs(b2) > Bpeak:
Bpeak = abs(b2)
phi = np.arctan2(by_vec[i], bx_vec[i])
#Transformation in Hauptrichutng
br_vec = []
bt_vec = []
for i in range(len(time)):
br_vec.append(np.cos(phi)*bx_vec[i]+np.sin(phi)*by_vec[i])
bt_vec.append(np.sin(phi)*bx_vec[i]-np.cos(phi)*by_vec[i])
Bpeak_p = np.sqrt(bx_vec[0]**2+by_vec[0]**2)
Bx = []
tp_beg = 0.0
tp_end = 0.0
Tp = 0.0
nzeros = 0
zero = (br_vec[0] >= 0)
if br_vec[1] > br_vec[0]:
up = True
else:
up = False
for i in range(1, len(time)):
b1 = np.sqrt(br_vec[i-1]**2+bt_vec[i-1]**2)
b2 = np.sqrt(br_vec[i]**2+bt_vec[i]**2)
# Maximalwert innerhalb letzter Periode
if abs(b2) > Bpeak_p:
Bpeak_p = abs(b2)
# Nulldurchgaenge und Periodendauer
if zero != (br_vec[i] >= 0):
zero = (not zero)
tp_beg = tp_end
tp_end = time[i]
if tp_beg > 0.0:
nzeros = nzeros+1
if nzeros > 1:
#Tp = (Tp*(nzeros-1)/nzeros+2*(tp_end-tp_beg)/nzeros)/2
Tp = 2*(tp_end-tp_beg)
Bpeak = Bpeak_p
elhystenergy = elhystenergy+kh*Bpeak**chbe/2
Bpeak_p = 0.0
else:
Tp = 2.0*(tp_end-tp_beg)
Bpeak = Bpeak_p
elhystenergy = elhystenergy+kh * \
Bpeak**chbe * (tp_end-time[0])/Tp
Bpeak_p = 0.0
Bx = []
# Wendepunkte
if up and b2 < b1:
Bx.append(b1)
if not up and b2 > b1:
Bx.append(b1)
# Steigungsrichtung
if b2 > b1:
up = True
else:
up = False
try:
if b2 > 0 and up and b2 > Bx[-2]:
Bm = abs(Bx[-2]+Bx[-1])/2
dB = abs(Bx[-2]-Bx[-1])
elhystenergy = elhystenergy + \
kh*Bm**(chbe-1)*khml*dB/2
Bx.remove(Bx[-2])
Bx.remove(Bx[-1])
if b2 < 0 and not up and b2 < Bx[-2]:
elhystenergy = elhystenergy + \
kh*Bm**(chbe-1)*khml*dB/2
Bx.remove(Bx[-2])
Bx.remove(Bx[-1])
if b2 > 0 and not up and Bx[-1] > Bx[-2]:
elhystenergy = elhystenergy + \
kh*Bm**(chbe-1)*khml*dB/2
Bx.remove(Bx[-2])
Bx.remove(Bx[-1])
if b2 < 0 and up and Bx[-1] < Bx[-2]:
elhystenergy = elhystenergy + \
kh*Bm**(chbe-1)*khml*dB/2
Bx.remove(Bx[-2])
Bx.remove(Bx[-1])
except:
pass
dt = time[i]-time[i-1]
dbr = br_vec[i]-br_vec[i-1]
dbt = bt_vec[i]-bt_vec[i-1]
db = np.sqrt(dbr**2+dbt**2)
eleddyenergy = eleddyenergy + kw*(db/dt)**cwbe * dt
elexceenergy = elexceenergy + ke*(db/dt)**cebe * dt
#elhystenergy = elhystenergy+kh*Bpeak**chbe * T/(time[-1]-time[0])
if nzeros >= 1:
elhystenergy = elhystenergy+kh * \
Bpeak**chbe * (time[-1]-tp_end)/Tp
sehystenergy = sehystenergy + elhystenergy*el.area*length*ff*sf*sw*scale_factor
seeddyenergy = seeddyenergy + eleddyenergy*el.area*length*ff*sf*sw*scale_factor
seexceenergy = seexceenergy + elexceenergy*el.area*length*ff*sf*sw*scale_factor
setotalenergy = sehystenergy + seeddyenergy + seexceenergy
return {'total': setotalenergy,
'hysteresis': sehystenergy,
'eddycurrent': seeddyenergy,
'excess': seexceenergy}
def iron_lossenergy_time_subregion(self, srname, start=0.0, end=0.0):
'''Iron loss energy of a subregion
Parameters
----------
srname: str
Name of subregion
start: float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
losses : dict
Iron losses energy of the subregion
The iron losses are calculated based on the Bertotti formula
in time domain (see also iron_lossenergy_time_se)
'''
if start != 0.0 or end != 0.0:
self.vtu_data.set_time_window(start, end)
data_list = ['b']
self.vtu_data.read_data(data_list)
self.times = TimeRange(self.vtu_data, self.nc_model)
srtotalenergy = 0.0
srhystenergy = 0.0
sreddyenergy = 0.0
srexceenergy = 0.0
sr = self.nc_model.get_subregion(srname)
for se in sr.superelements:
seenergy = self.iron_lossenergy_time_se(se)
srtotalenergy = srtotalenergy + seenergy['total']
srhystenergy = srhystenergy + seenergy['hysteresis']
sreddyenergy = sreddyenergy + seenergy['eddycurrent']
srexceenergy = srexceenergy + seenergy['excess']
srenergy = {'subregion': srname,
'total': srtotalenergy,
'hysteresis': srhystenergy,
'eddycurrent': sreddyenergy,
'excess': srexceenergy
}
return srenergy
def iron_losses_time_subregion(self, srname, start=0.0, end=0.0):
'''Iron power losses of a subregion
Parameters
----------
srname: str
Name of subregion
start: float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
losses : dict
Iron losses energy of the subregion
The iron losses are calculated based on the Bertotti formula
in time domain (see also iron_lossenergy_time_se)
'''
while len(srname) < 4:
srname = srname+' '
srenergy = self.iron_lossenergy_time_subregion(srname, start, end)
time = self.times.vector[-1]-self.times.vector[0]
srlosses = {'subregion': srname,
'total': srenergy['total']/time,
'hysteresis': srenergy['hysteresis']/time,
'eddycurrent': srenergy['eddycurrent']/time,
'excess': srenergy['excess']/time
}
return srlosses
def iron_lossenergy_time(self, start=0.0, end=0.0):
'''Iron losses of all subregion and superelements
Parameters
----------
start: float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
energies : dict
Iron losses enegies of the subregion
The iron losses are calculated based on the Bertotti formula
in time domain (see also iron_lossenergy_time_se)
'''
if start != 0.0 or end != 0.0:
self.vtu_data.set_time_window(start, end)
data_list = ['b']
self.vtu_data.read_data(data_list)
self.times = TimeRange(self.vtu_data, self.nc_model)
energylist = []
for se in self.nc_model.superelements:
selossenergy = self.iron_lossenergy_time_se(se)
if se.subregion:
for sr in self.nc_model.subregions:
if se in sr.superelements:
srname = sr.name
#print(se.key, "in", sr.key, sr.name)
else:
if (se.mcvtype == 0):
center_pnt = se.elements[0].center
if (np.sqrt(center_pnt[0]**2+center_pnt[1]**2) > self.nc_model.FC_RADIUS):
srname = "no, outside"
else:
srname = "no, inside"
found = False
for srlosses in energylist:
if srlosses['subregion'] == srname:
srlosses['total'] = srlosses['total']+selossenergy['total']
srlosses['hysteresis'] = srlosses['hysteresis'] + \
selossenergy['hysteresis']
srlosses['eddycurrent'] = srlosses['eddycurrent'] + \
selossenergy['eddycurrent']
srlosses['excess'] = srlosses['excess'] + \
selossenergy['excess']
found = True
if not found:
if selossenergy['total'] > 0.0:
srlosses = {'subregion': srname,
'total': selossenergy['total'],
'hysteresis': selossenergy['hysteresis'],
'eddycurrent': selossenergy['eddycurrent'],
'excess': selossenergy['excess']
}
energylist.append(srlosses)
return energylist
def iron_losses_time(self, start=0.0, end=0.0):
'''Iron losses of all subregion and superelements
Parameters
----------
start: float
Start of the time window (optional)
end : float
End of the time window (optional)
Returns
-------
losses : dict
Iron losses of the subregion
The iron losses are calculated based on the Bertotti formula
in time domain (see also iron_lossenergy_time_se)
'''
energylist = self.iron_lossenergy_time(start, end)
time = self.times.vector[-1]-self.times.vector[0]
losseslist = []
for sr in energylist:
sr['total'] = sr['total']/time
sr['hysteresis'] = sr['hysteresis']/time
sr['eddycurrent'] = sr['eddycurrent']/time
sr['excess'] = sr['excess']/time
losseslist.append(sr)
return losseslist
| 37.29716
| 99
| 0.511108
|
de410ba23e79a4f845b2f9c843a8533e017edf1c
| 2,297
|
py
|
Python
|
tests/test_api_consumer.py
|
qxl0/chain
|
92152199257e3232f72ea4326022a39326462c7f
|
[
"MIT"
] | 1
|
2022-02-10T18:59:52.000Z
|
2022-02-10T18:59:52.000Z
|
tests/test_api_consumer.py
|
qxl0/chain
|
92152199257e3232f72ea4326022a39326462c7f
|
[
"MIT"
] | null | null | null |
tests/test_api_consumer.py
|
qxl0/chain
|
92152199257e3232f72ea4326022a39326462c7f
|
[
"MIT"
] | 1
|
2022-03-18T15:35:56.000Z
|
2022-03-18T15:35:56.000Z
|
import time
import pytest
from brownie import APIConsumer, network, config
from scripts.helpful_scripts import (
LOCAL_BLOCKCHAIN_ENVIRONMENTS,
get_account,
listen_for_event,
get_contract,
fund_with_link
)
@pytest.fixture
def deploy_api_contract(get_job_id, chainlink_fee):
# Arrange / Act
api_consumer = APIConsumer.deploy(
get_contract("oracle").address,
get_job_id,
chainlink_fee,
get_contract("link_token").address,
{"from": get_account()},
)
block_confirmations=6
if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
block_confirmations=1
api_consumer.tx.wait(block_confirmations)
# Assert
assert api_consumer is not None
return api_consumer
def test_send_api_request_local(
deploy_api_contract,
chainlink_fee,
get_data,
):
# Arrange
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip("Only for local testing")
api_contract = deploy_api_contract
get_contract("link_token").transfer(
api_contract.address, chainlink_fee * 2, {"from": get_account()}
)
# Act
transaction_receipt = api_contract.requestVolumeData({"from": get_account()})
requestId = transaction_receipt.events["ChainlinkRequested"]["id"]
# Assert
get_contract("oracle").fulfillOracleRequest(
requestId, get_data, {"from": get_account()}
)
assert isinstance(api_contract.volume(), int)
assert api_contract.volume() > 0
def test_send_api_request_testnet(deploy_api_contract, chainlink_fee):
# Arrange
if network.show_active() not in ["kovan", "rinkeby", "mainnet"]:
pytest.skip("Only for local testing")
api_contract = deploy_api_contract
if (config["networks"][network.show_active()].get("verify", False)):
APIConsumer.publish_source(api_contract)
tx = fund_with_link(
api_contract.address, amount=chainlink_fee
)
tx.wait(1)
# Act
transaction = api_contract.requestVolumeData({"from": get_account()})
transaction.wait(1)
# Assert
event_response = listen_for_event(api_contract, "DataFullfilled")
assert event_response.event is not None
assert isinstance(api_contract.volume(), int)
assert api_contract.volume() > 0
| 29.831169
| 81
| 0.707444
|
d521db00d47d430363804d61213d9f5f53ac5abe
| 1,031
|
py
|
Python
|
hstest/test_chip_compute2.py
|
Erotemic/hotspotter
|
3cfa4015798e21385455b937f9083405c4b3cf53
|
[
"Apache-2.0"
] | 2
|
2015-07-19T02:55:06.000Z
|
2021-07-07T02:38:26.000Z
|
hstest/test_chip_compute2.py
|
Erotemic/hotspotter
|
3cfa4015798e21385455b937f9083405c4b3cf53
|
[
"Apache-2.0"
] | 5
|
2017-03-11T16:30:26.000Z
|
2021-04-10T16:42:10.000Z
|
hstest/test_chip_compute2.py
|
Erotemic/hotspotter
|
3cfa4015798e21385455b937f9083405c4b3cf53
|
[
"Apache-2.0"
] | 10
|
2015-07-19T03:05:42.000Z
|
2021-08-24T14:48:59.000Z
|
from hotspotter import HotSpotterAPI as api
from hotspotter import chip_compute2 as cc2
from hscom import argparse2
from hscom import helpers
from hscom import helpers as util
from hsviz import viz
import multiprocessing
import numpy as np # NOQA
if __name__ == '__main__':
multiprocessing.freeze_support()
# Debugging vars
chip_cfg = None
#l')=103.7900s
cx_list = None
kwargs = {}
# --- LOAD TABLES --- #
args = argparse2.parse_arguments(defaultdb='NAUTS')
hs = api.HotSpotter(args)
hs.load_tables()
hs.update_samples()
# --- LOAD CHIPS --- #
force_compute = helpers.get_flag('--force', default=False)
cc2.load_chips(hs, force_compute=force_compute)
cx = helpers.get_arg('--cx', type_=int)
if not cx is None:
#tau = np.pi * 2
#hs.change_theta(cx, tau / 8)
viz.show_chip(hs, cx, draw_kpts=False, fnum=1)
viz.show_image(hs, hs.cx2_gx(cx), fnum=2)
else:
print('usage: feature_compute.py --cx [cx]')
exec(viz.df2.present())
| 30.323529
| 62
| 0.664403
|
1b5fa800bcefc9bde23c11369e5b6f27d7c7f39c
| 15,475
|
py
|
Python
|
plugins/modules/oci_database_migration_connection_actions.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 108
|
2020-05-19T20:46:10.000Z
|
2022-03-25T14:10:01.000Z
|
plugins/modules/oci_database_migration_connection_actions.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 90
|
2020-06-14T22:07:11.000Z
|
2022-03-07T05:40:29.000Z
|
plugins/modules/oci_database_migration_connection_actions.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 42
|
2020-08-30T23:09:12.000Z
|
2022-03-25T16:58:01.000Z
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_migration_connection_actions
short_description: Perform actions on a Connection resource in Oracle Cloud Infrastructure
description:
- Perform actions on a Connection resource in Oracle Cloud Infrastructure
- For I(action=change_compartment), used to change the Database Connection compartment.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
connection_id:
description:
- The OCID of the database connection
type: str
aliases: ["id"]
required: true
compartment_id:
description:
- The OCID of the compartment to move the resource to.
type: str
required: true
action:
description:
- The action to perform on the Connection.
type: str
required: true
choices:
- "change_compartment"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action change_compartment on connection
oci_database_migration_connection_actions:
# required
connection_id: "ocid1.connection.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
action: change_compartment
"""
RETURN = """
connection:
description:
- Details of the Connection resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The OCID of the resource
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- OCID of the compartment
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
database_type:
description:
- Database connection type.
returned: on success
type: str
sample: MANUAL
display_name:
description:
- Database Connection display name identifier.
returned: on success
type: str
sample: display_name_example
database_id:
description:
- The OCID of the cloud database.
returned: on success
type: str
sample: "ocid1.database.oc1..xxxxxxEXAMPLExxxxxx"
connect_descriptor:
description:
- ""
returned: on success
type: complex
contains:
host:
description:
- Host of the connect descriptor.
returned: on success
type: str
sample: host_example
port:
description:
- Port of the connect descriptor.
returned: on success
type: int
sample: 56
database_service_name:
description:
- Database service name.
returned: on success
type: str
sample: database_service_name_example
connect_string:
description:
- Connect string.
returned: on success
type: str
sample: connect_string_example
credentials_secret_id:
description:
- OCID of the Secret in the OCI vault containing the Database Connection credentials.
returned: on success
type: str
sample: "ocid1.credentialssecret.oc1..xxxxxxEXAMPLExxxxxx"
certificate_tdn:
description:
- This name is the distinguished name used while creating the certificate on target database.
returned: on success
type: str
sample: certificate_tdn_example
ssh_details:
description:
- ""
returned: on success
type: complex
contains:
host:
description:
- Name of the host the SSH key is valid for.
returned: on success
type: str
sample: host_example
user:
description:
- SSH user
returned: on success
type: str
sample: user_example
sudo_location:
description:
- Sudo location
returned: on success
type: str
sample: sudo_location_example
admin_credentials:
description:
- ""
returned: on success
type: complex
contains:
username:
description:
- Administrator username
returned: on success
type: str
sample: username_example
private_endpoint:
description:
- ""
returned: on success
type: complex
contains:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment to contain the
private endpoint.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
vcn_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the VCN where the Private Endpoint will be bound
to.
returned: on success
type: str
sample: "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx"
subnet_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the customer's
subnet where the private endpoint VNIC will reside.
returned: on success
type: str
sample: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
id:
description:
- L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a previously created Private Endpoint.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
vault_details:
description:
- ""
returned: on success
type: complex
contains:
compartment_id:
description:
- OCID of the compartment where the secret containing the credentials will be created.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
vault_id:
description:
- OCID of the vault
returned: on success
type: str
sample: "ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx"
key_id:
description:
- OCID of the vault encryption key
returned: on success
type: str
sample: "ocid1.key.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The current state of the Connection resource.
returned: on success
type: str
sample: CREATING
lifecycle_details:
description:
- A message describing the current state in more detail. For example, can be used to provide actionable information
for a resource in Failed state.
returned: on success
type: str
sample: lifecycle_details_example
time_created:
description:
- The time the Connection resource was created. An RFC3339 formatted datetime string.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The time of the last Connection resource details update. An RFC3339 formatted datetime string.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "Usage of system tag keys. These predefined keys are scoped to namespaces.
Example: `{\\"orcl-cloud\\": {\\"free-tier-retained\\": \\"true\\"}}`"
returned: on success
type: dict
sample: {}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"database_type": "MANUAL",
"display_name": "display_name_example",
"database_id": "ocid1.database.oc1..xxxxxxEXAMPLExxxxxx",
"connect_descriptor": {
"host": "host_example",
"port": 56,
"database_service_name": "database_service_name_example",
"connect_string": "connect_string_example"
},
"credentials_secret_id": "ocid1.credentialssecret.oc1..xxxxxxEXAMPLExxxxxx",
"certificate_tdn": "certificate_tdn_example",
"ssh_details": {
"host": "host_example",
"user": "user_example",
"sudo_location": "sudo_location_example"
},
"admin_credentials": {
"username": "username_example"
},
"private_endpoint": {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"vcn_id": "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx",
"subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
},
"vault_details": {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"vault_id": "ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx",
"key_id": "ocid1.key.oc1..xxxxxxEXAMPLExxxxxx"
},
"lifecycle_state": "CREATING",
"lifecycle_details": "lifecycle_details_example",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"system_tags": {}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.database_migration import DatabaseMigrationClient
from oci.database_migration.models import ChangeConnectionCompartmentDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ConnectionActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
change_compartment
"""
@staticmethod
def get_module_resource_id_param():
return "connection_id"
def get_module_resource_id(self):
return self.module.params.get("connection_id")
def get_get_fn(self):
return self.client.get_connection
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_connection,
connection_id=self.module.params.get("connection_id"),
)
def change_compartment(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeConnectionCompartmentDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.change_connection_compartment,
call_fn_args=(),
call_fn_kwargs=dict(
connection_id=self.module.params.get("connection_id"),
change_connection_compartment_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
ConnectionActionsHelperCustom = get_custom_class("ConnectionActionsHelperCustom")
class ResourceHelper(ConnectionActionsHelperCustom, ConnectionActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
connection_id=dict(aliases=["id"], type="str", required=True),
compartment_id=dict(type="str", required=True),
action=dict(type="str", required=True, choices=["change_compartment"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="connection",
service_client_class=DatabaseMigrationClient,
namespace="database_migration",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| 36.411765
| 160
| 0.56504
|
4f241bb0eecb594e51f00d5fce47f958adaa9fae
| 1,704
|
py
|
Python
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2021_08_01/models/_application_insights_management_client_enums.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-01-24T08:54:57.000Z
|
2022-01-24T08:54:57.000Z
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2021_08_01/models/_application_insights_management_client_enums.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2021_08_01/models/_application_insights_management_client_enums.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class CategoryType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WORKBOOK = "workbook"
TSG = "TSG"
PERFORMANCE = "performance"
RETENTION = "retention"
class CreatedByType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class Kind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of workbook. Only valid value is shared.
"""
USER = "user"
SHARED = "shared"
class ManagedServiceIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of managed service identity (where both SystemAssigned and UserAssigned types are
allowed).
"""
NONE = "None"
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned"
class SharedTypeKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of workbook. Only valid value is shared.
"""
USER = "user"
SHARED = "shared"
| 32.150943
| 94
| 0.663732
|
04b8014ea95e305e90c33f41f9a47e39addb271b
| 1,795
|
py
|
Python
|
setup.py
|
csmith/docker-rerun
|
de31a64b5eb43cc3644354bb5980e22e0ee9e7a4
|
[
"MIT"
] | 4
|
2017-11-23T09:50:35.000Z
|
2020-08-25T12:42:22.000Z
|
setup.py
|
csmith/docker-rerun
|
de31a64b5eb43cc3644354bb5980e22e0ee9e7a4
|
[
"MIT"
] | 1
|
2016-12-28T19:30:40.000Z
|
2016-12-31T02:24:04.000Z
|
setup.py
|
csmith/docker-rerun
|
de31a64b5eb43cc3644354bb5980e22e0ee9e7a4
|
[
"MIT"
] | 3
|
2016-12-28T20:36:30.000Z
|
2021-02-08T11:24:16.000Z
|
"""Setuptools based setup module for docker-rerun."""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='docker-rerun',
version='0.1.1',
description='Command-line tool to re-run a docker container',
long_description='docker-rerun is a small utility script that makes it ' \
'easy to re-run docker containers using the same ' \
'arguments you used previously.' \
'\n\n' \
'Want to update to a newer image, or add a missing port ' \
'publication? docker-rerun’s got you covered.' \
'\n\n' \
'See the GitHub project_ for more info.' \
'\n\n' \
'.. _project: https://github.com/csmith/docker-rerun',
url='https://github.com/csmith/docker-rerun',
author='Chris Smith',
author_email='chris87@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
keywords='docker container',
py_modules=["docker_rerun"],
install_requires=[],
test_suite='nose.collector',
extras_require={
'dev': ['pylint'],
'test': ['coverage', 'nose', 'python-coveralls'],
},
entry_points={
'console_scripts': [
'docker-rerun=docker_rerun:entrypoint',
],
},
)
| 28.492063
| 80
| 0.567131
|
786ed40f07c446a6a75399460163792170141767
| 410
|
py
|
Python
|
tests/test_base.py
|
StevenKangWei/musicsa
|
485894f0c7494163cf2637542729be75c789262c
|
[
"MIT"
] | null | null | null |
tests/test_base.py
|
StevenKangWei/musicsa
|
485894f0c7494163cf2637542729be75c789262c
|
[
"MIT"
] | null | null | null |
tests/test_base.py
|
StevenKangWei/musicsa
|
485894f0c7494163cf2637542729be75c789262c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import os
import sys
import unittest
dirname = os.path.dirname(os.path.abspath(__file__))
project = os.path.abspath(os.path.join(dirname, '../musicsa'))
if project not in sys.path:
sys.path.insert(0, project)
class BaseTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@staticmethod
def main():
return unittest.main()
| 17.083333
| 62
| 0.665854
|
09c4912fb7c3840aba8d478a284b923b23a2bc60
| 83
|
py
|
Python
|
bc1/__init__.py
|
jpercent/bc
|
ed8b91543f2854972bcbcc7f6f84cf78fabcf33f
|
[
"FSFAP"
] | null | null | null |
bc1/__init__.py
|
jpercent/bc
|
ed8b91543f2854972bcbcc7f6f84cf78fabcf33f
|
[
"FSFAP"
] | null | null | null |
bc1/__init__.py
|
jpercent/bc
|
ed8b91543f2854972bcbcc7f6f84cf78fabcf33f
|
[
"FSFAP"
] | null | null | null |
from .pyflex import lex, yacc
from .bc import *
__author__ = 'jpercent'
#del bc
| 10.375
| 29
| 0.698795
|
e226f811b5337de76fe5dcd1fee6ffcd9a7beb45
| 404
|
py
|
Python
|
ros/lib/host_inventory.py
|
RedHatInsights/resource-optimization-test
|
b94f29964e26e42a930f1ca589db80ed317afa0f
|
[
"Apache-2.0"
] | null | null | null |
ros/lib/host_inventory.py
|
RedHatInsights/resource-optimization-test
|
b94f29964e26e42a930f1ca589db80ed317afa0f
|
[
"Apache-2.0"
] | null | null | null |
ros/lib/host_inventory.py
|
RedHatInsights/resource-optimization-test
|
b94f29964e26e42a930f1ca589db80ed317afa0f
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
from ros.config import INVENTORY_ADDRESS
def fetch_host_from_inventory(insights_id, rh_identity):
host_api_url = f"{INVENTORY_ADDRESS}/api/inventory/v1/hosts?insights_id={insights_id}"
headers = {'x-rh-identity': rh_identity, 'Content-Type': 'application/json'}
res = requests.get(host_api_url, headers=headers)
hosts = json.loads(res.text)
return hosts
| 33.666667
| 90
| 0.762376
|
af46d81a2705addc137f456543569bed6f6da6f7
| 4,256
|
py
|
Python
|
tests/instrumentation/pymssql_tests.py
|
dpaluch-rp/apm-agent-python
|
8b11d232f37c0affe0a7c92f590b05106c55b3b3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/instrumentation/pymssql_tests.py
|
dpaluch-rp/apm-agent-python
|
8b11d232f37c0affe0a7c92f590b05106c55b3b3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/instrumentation/pymssql_tests.py
|
dpaluch-rp/apm-agent-python
|
8b11d232f37c0affe0a7c92f590b05106c55b3b3
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import pytest
from elasticapm.conf.constants import TRANSACTION
from elasticapm.instrumentation.packages.pymssql import get_host_port
from elasticapm.utils import default_ports
pymssql = pytest.importorskip("pymssql")
pytestmark = [pytest.mark.pymssql]
if "MSSQL_HOST" not in os.environ:
pytestmark.append(pytest.mark.skip("Skipping MS-SQL tests, no MSSQL_HOST environment variable set"))
@pytest.yield_fixture(scope="function")
def pymssql_connection(request):
conn = pymssql.connect(
os.environ.get("MSSQL_HOST", "localhost"),
os.environ.get("MSSQL_USER", "SA"),
os.environ.get("MSSQL_PASSWORD", ""),
os.environ.get("MSSQL_DATABASE", "tempdb"),
)
cursor = conn.cursor()
cursor.execute(
"CREATE TABLE test(id INT, name NVARCHAR(5) NOT NULL);"
"INSERT INTO test VALUES (1, 'one'), (2, 'two'), (3, 'three');"
)
yield conn
# cleanup
conn.rollback()
@pytest.mark.integrationtest
def test_pymssql_select(instrument, pymssql_connection, elasticapm_client):
cursor = pymssql_connection.cursor()
query = "SELECT * FROM test WHERE name LIKE 't%' ORDER BY id"
try:
elasticapm_client.begin_transaction("web.django")
cursor.execute(query)
assert cursor.fetchall() == [(2, "two"), (3, "three")]
elasticapm_client.end_transaction(None, "test-transaction")
finally:
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
span = spans[0]
assert span["name"] == "SELECT FROM test"
assert span["type"] == "db"
assert span["subtype"] == "pymssql"
assert span["action"] == "query"
assert "db" in span["context"]
assert span["context"]["db"]["type"] == "sql"
assert span["context"]["db"]["statement"] == query
assert span["context"]["destination"] == {
"address": "mssql",
"port": default_ports["mssql"],
"service": {"name": "mssql", "resource": "mssql", "type": "db"},
}
@pytest.mark.parametrize(
"args,kwargs,expected",
[
(("localhost",), {"port": 1234}, {"host": "localhost", "port": 1234}),
(("localhost",), {}, {"host": "localhost", "port": default_ports["mssql"]}),
((), {"host": "localhost,1234"}, {"host": "localhost", "port": 1234}),
((), {"host": "localhost:1234"}, {"host": "localhost", "port": 1234}),
],
)
def test_host_port_parsing(args, kwargs, expected):
host, port = get_host_port(args, kwargs)
assert host == expected["host"]
assert port == expected["port"]
| 39.045872
| 104
| 0.679746
|
024fc593ba277a7e4baa5b8a88caa218aaffd3aa
| 1,179
|
py
|
Python
|
networks/network_utils.py
|
ademiadeniji/lords
|
75ce115ec7f950d857d0817eb0adf2cc2673ffdd
|
[
"Apache-2.0"
] | null | null | null |
networks/network_utils.py
|
ademiadeniji/lords
|
75ce115ec7f950d857d0817eb0adf2cc2673ffdd
|
[
"Apache-2.0"
] | null | null | null |
networks/network_utils.py
|
ademiadeniji/lords
|
75ce115ec7f950d857d0817eb0adf2cc2673ffdd
|
[
"Apache-2.0"
] | null | null | null |
"""Network utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def task_multiplex(inputs, z, num_tasks):
"""The multiplex module for multitask data.
Args:
inputs: Tensor of shape [batch_size, ..., num_tasks * dim_outputs].
z: The integer task index of shape [batch_size].
num_tasks: The number of tasks.
Returns:
A tensor of shape [batch_size, ..., dim_outputs].
"""
# dim_inputs = num_tasks * dim_outputs
dim_inputs = int(inputs.shape[-1])
assert dim_inputs % num_tasks == 0
dim_outputs = int(dim_inputs / num_tasks)
new_shape = tf.concat(
[tf.shape(inputs)[:-1], [num_tasks, dim_outputs]],
axis=-1)
state = tf.reshape(inputs, new_shape)
# [batch_size, ..., num_tasks, dim_outputs]
state = tf.stack(tf.unstack(state, axis=-2), axis=1)
# [batch_size, num_tasks, ..., dim_outputs]
indices = tf.expand_dims(z, axis=-1)
# [batch_size, 1]
state = tf.gather_nd(
state,
indices,
batch_dims=1)
# [batch_size, ..., dim_outputs]
return state
| 28.756098
| 75
| 0.644614
|
dfb10b58d22903dfe3db3d9ca6dcf6bdae336c01
| 4,037
|
py
|
Python
|
src/upload_images_s3.py
|
NVIDIA-AI-IOT/deepstream-fpfilter
|
e00d889e18e618e32ff0020afa1a70496e739516
|
[
"MIT"
] | 6
|
2021-11-03T15:14:21.000Z
|
2022-03-22T12:32:41.000Z
|
src/upload_images_s3.py
|
NVIDIA-AI-IOT/deepstream-fpfilter
|
e00d889e18e618e32ff0020afa1a70496e739516
|
[
"MIT"
] | null | null | null |
src/upload_images_s3.py
|
NVIDIA-AI-IOT/deepstream-fpfilter
|
e00d889e18e618e32ff0020afa1a70496e739516
|
[
"MIT"
] | 2
|
2021-09-23T19:11:41.000Z
|
2021-12-22T00:06:41.000Z
|
'''
Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
'''
import boto3
from botocore.exceptions import ClientError
import logging
import os
from os import environ
import sys
from os import listdir
from os.path import isfile, join
'''
Apis to upload images to S3 bucket.
To use the api's, AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environmental variables needs to be set
or
~/.aws/config file should be created and include key and secret access key info. A sample config looks like this:
[default]
aws_access_key_id = <your username here>
aws_secret_access_key = <your S3 API key here>
Also, set region, bucket name and endpoint url below to upload to the s3 bucket.
'''
DEFAULT_LOCATION = <region name>
BUCKET_NAME = <name of the bucket to upload images>
ENDPOINT_URL = <endpoint url>
s3 = boto3.client('s3', region_name=DEFAULT_LOCATION, endpoint_url=ENDPOINT_URL)
def get_bucket_list():
'''
returns list of buckets.
'''
response = s3.list_buckets()
return [dict['Name'] for dict in response['Buckets']]
def create_bucket(bucket_name):
'''
Creates bucket. Versioning is disable by default.
'''
response = s3.list_buckets()
buckets_dict_list = response['Buckets']
for dict_item in buckets_dict_list:
if dict_item["Name"] == bucket_name:
print('bucket with name {} already exists'.format(bucket_name))
return True
try:
location = {'LocationConstraint': DEFAULT_LOCATION}
s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=location)
except ClientError as e:
logging.error(e)
return False
return True
def get_file_list_in_bucket(bucket_name):
'''
Returns list of files in s3 bucket.
'''
obj_info = s3.list_objects(Bucket=bucket_name)
if 'Contents' not in obj_info:
return []
return [dict['Key'] for dict in obj_info['Contents']]
def upload_file_to_bucket(bucket_name, file_name, object_name=None):
'''
Uploads file to bucket.
'''
if object_name is None:
object_name = os.path.basename(file_name)
print("uploading ", file_name)
try:
response = s3.upload_file(file_name, bucket_name, object_name)
except ClientError as e:
logging.error(e)
return False
return True
def delete_file_in_bucket(bucket_name, object_name):
'''
Deletes file from the bucket.
'''
s3.delete_object(Bucket=bucket_name, Key=object_name)
def clear_bucket(bucket_name):
'''
Deletes all files from the bucket.
'''
s3_res = boto3.resource('s3')
bucket = s3_res.Bucket(bucket_name)
bucket.objects.all().delete()
def delete_bucket(bucket_name):
clear_bucket(bucket_name)
s3.delete_bucket(Bucket=bucket_name)
if __name__ == '__main__':
print(sys.argv)
create_bucket(BUCKET_NAME)
if upload_file_to_bucket(BUCKET_NAME, sys.argv[1]%int(sys.argv[3])):
print("Uploading success")
else:
print("Uploading failed")
| 31.294574
| 113
| 0.72653
|
ec105dfaeb0f292faca13f02fbb9755da8605aba
| 251,311
|
py
|
Python
|
tensorflow/python/ops/image_ops_test.py
|
TheRakeshPurohit/tensorflow
|
bee6d5a268122df99e1e55a7b92517e84ad25bab
|
[
"Apache-2.0"
] | 1
|
2022-03-18T17:36:11.000Z
|
2022-03-18T17:36:11.000Z
|
tensorflow/python/ops/image_ops_test.py
|
TheRakeshPurohit/tensorflow
|
bee6d5a268122df99e1e55a7b92517e84ad25bab
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/image_ops_test.py
|
TheRakeshPurohit/tensorflow
|
bee6d5a268122df99e1e55a7b92517e84ad25bab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
import colorsys
import contextlib
import functools
import itertools
import math
import os
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import get_single_element
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config as tf_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session():
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
def testRGBToHSVDataTypes(self):
# Test case for GitHub issue 54855.
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for dtype in [
dtypes.float32, dtypes.float64, dtypes.float16, dtypes.bfloat16
]:
with self.cached_session(use_gpu=False):
rgb = math_ops.cast(
np.array(data, np.float32).reshape([2, 2, 3]) / 255., dtype=dtype)
hsv = image_ops.rgb_to_hsv(rgb)
val = image_ops.hsv_to_rgb(hsv)
out = self.evaluate(val)
self.assertAllClose(rgb, out, atol=1e-2)
class RGBToYIQTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yiq and yiq_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yuv and yuv_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in range(images.shape[0]):
for y in range(images.shape[1]):
for x in range(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testGrayscaleToRGBInputValidation(self):
# tests whether the grayscale_to_rgb function raises
# an exception if the input images' last dimension is
# not of size 1, i.e. the images have shape
# [batch size, height, width] or [height, width]
# tests if an exception is raised if a three dimensional
# input is used, i.e. the images have shape [batch size, height, width]
with self.cached_session():
# 3-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "Last dimension of a grayscale image should be size 1"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
# tests if an exception is raised if a two dimensional
# input is used, i.e. the images have shape [height, width]
with self.cached_session():
# 1-D input without batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "must be at least two-dimensional"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.cached_session():
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.cached_session():
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.cached_session():
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.cached_session():
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_less_zero_float32(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_uint8(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image = image_ops.adjust_gamma(x, gamma=y)
self.evaluate(image)
def _test_adjust_gamma_uint8(self, gamma):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = np.trunc(self.evaluate(y))
# calculate gamma correction using numpy
# firstly, transform uint8 to float representation
# then perform correction
y_np = np.power(x_np / 255.0, gamma)
# convert correct numpy image back to uint8 type
y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0))
self.assertAllClose(y_tf, y_np, 1e-6)
def _test_adjust_gamma_float32(self, gamma):
"""Verifying the output with expected results for gamma
correction for float32 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 1.0, (8, 8))
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = self.evaluate(y)
y_np = np.clip(np.power(x_np, gamma), 0, 1.0)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_one_float32(self):
"""Same image should be returned for gamma equal to one"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_one_uint8(self):
self._test_adjust_gamma_uint8(1.0)
def test_adjust_gamma_zero_uint8(self):
"""White image should be returned for gamma equal
to zero for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.0)
def test_adjust_gamma_less_one_uint8(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.5)
def test_adjust_gamma_greater_one_uint8(self):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=1.0)
def test_adjust_gamma_less_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for float32 images
"""
self._test_adjust_gamma_float32(0.5)
def test_adjust_gamma_greater_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two for float32 images
"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_zero_float32(self):
"""White image should be returned for gamma equal
to zero for float32 images
"""
self._test_adjust_gamma_float32(0.0)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in range(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session():
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = self.evaluate(y)
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegex(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
def testInvalidDeltaValue(self):
"""Delta value must be in the inetrval of [-1,1]."""
if not context.executing_eagerly():
self.skipTest("Eager mode only")
else:
with self.cached_session():
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = r"delta must be in the interval \[-1, 1\]"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_hue(x, delta=1.5)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for _ in range(warmup_rounds):
self.evaluate(run_op)
start = time.time()
for _ in range(benchmark_rounds):
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in range(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self.evaluate(image_ops.adjust_saturation(x_np, scale))
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRightStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down),
)
def testRandomFlipStateless(self, func):
with test_util.use_gpu():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
if y_tf_eval[0][0] == 1:
self.assertAllEqual(y_tf_eval, x_np)
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval, y_np)
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
# Verify that results are deterministic.
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down)
)
def testRandomFlipStatelessWithBatch(self, func):
with test_util.use_gpu():
batch_size = 16
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([1, 2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
for j in range(batch_size):
if y_tf_eval[j][0][0] == 1:
self.assertAllEqual(y_tf_eval[j], x_np[j])
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval[j], y_np[j])
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDownStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertIsNone(transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegex(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegex(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
rotated = image
for _ in range(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
rotated = image
for _ in range(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
for k in range(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
for k in range(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testFlipImageUnknownShape(self):
expected_output = constant_op.constant([[[[3, 4, 5], [0, 1, 2]],
[[9, 10, 11], [6, 7, 8]]]])
def generator():
image_input = np.array(
[[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], np.int32)
yield image_input
dataset = dataset_ops.Dataset.from_generator(
generator,
output_types=dtypes.int32,
output_shapes=tensor_shape.TensorShape([1, 2, 2, 3]))
dataset = dataset.map(image_ops.flip_left_right)
image_flipped_via_dataset_map = get_single_element.get_single_element(
dataset.take(1))
self.assertAllEqual(image_flipped_via_dataset_map, expected_output)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float64).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float64).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"contrast_factor must be scalar|"
"Shape must be rank 0 but is rank 1"):
image_ops.adjust_contrast(x_np, [2.0])
@test_util.run_in_graph_and_eager_modes
def testDeterminismUnimplementedExceptionThrowing(self):
"""Test d9m-unimplemented exception-throwing when op-determinism is enabled.
This test depends upon other tests, tests which do not enable
op-determinism, to ensure that determinism-unimplemented exceptions are not
erroneously thrown when op-determinism is not enabled.
"""
if test_util.is_xla_enabled():
self.skipTest('XLA implementation does not raise exception')
with self.session(), test_util.deterministic_ops():
input_shape = (1, 2, 2, 1)
on_gpu = len(tf_config.list_physical_devices("GPU"))
# AdjustContrast seems to now be inaccessible via the Python API.
# AdjustContrastv2 only supports float16 and float32 on GPU, and other
# types are converted to and from float32 at the Python level before
# AdjustContrastv2 is called.
dtypes_to_test = [
dtypes.uint8, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.float32,
dtypes.float64
]
if on_gpu:
dtypes_to_test.append(dtypes.float16)
ctx_mgr = self.assertRaisesRegex(
errors.UnimplementedError,
"A deterministic GPU implementation of AdjustContrastv2 is not" +
" currently available.")
else:
ctx_mgr = contextlib.suppress()
for dtype in dtypes_to_test:
input_images = array_ops.zeros(input_shape, dtype=dtype)
contrast_factor = 1.
with ctx_mgr:
output_images = image_ops.adjust_contrast(input_images,
contrast_factor)
self.evaluate(output_images)
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat16(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
mn = np.mean(x)
std = np.std(x)
stddev = max(std, 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
@parameterized.named_parameters([("_int8", np.int8), ("_int16", np.int16),
("_int32", np.int32), ("_int64", np.int64),
("_uint8", np.uint8), ("_uint16", np.uint16),
("_uint32", np.uint32),
("_uint64", np.uint64),
("_float32", np.float32)])
def testBasic(self, data_type):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=data_type).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.cached_session():
x = constant_op.constant(x_np, dtype=data_type, shape=x_shape)
y = image_ops.per_image_standardization(x)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.cached_session():
whiten_np = self.evaluate(whiten)
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.cached_session():
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = self.evaluate(whiten)
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
with self.cached_session():
return self.evaluate(y)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (
([-1, 0, 3, 3], "offset_height must be >= 0"),
([0, -1, 3, 3], "offset_width must be >= 0"),
([0, 0, 0, 3], "target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], r"height must be >= target \+ offset"),
([0, 2, 3, 3], r"width must be >= target \+ offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y_tf = self.evaluate(image_ops.central_crop(x_np, 0.33))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
def testCentralFractionTensor(self):
# Test case for GitHub issue 45324.
x_shape = [240, 320, 3]
y_shape = [80, 106, 3]
@def_function.function(autograph=False)
def f(x, central_fraction):
return image_ops.central_crop(x, central_fraction)
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
y_tf = self.evaluate(f(x_np, constant_op.constant(0.33)))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
class PadToBoundingBoxTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def pad_bbox(*args):
return image_ops.pad_to_bounding_box(*args)
with self.cached_session():
return self.evaluate(pad_bbox(x_tensor, offset_height, offset_width,
target_height, target_width))
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.cached_session():
self.assertAllClose(y, self.evaluate(y_tf))
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParamsScalarInputs(self):
# In this test, inputs do not get converted to tensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the scalars.
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[False])
def testBadParamsTensorInputsEager(self):
# In this test inputs get converted to EagerTensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the tensor's values.
with context.eager_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[True])
@parameterized.named_parameters([("OffsetHeight", (-1, 0, 4, 4)),
("OffsetWidth", (0, -1, 4, 4)),
("Height", (2, 0, 4, 4)),
("Width", (0, 2, 4, 4))])
def testBadParamsTensorInputsGraph(self, config):
# In this test inputs get converted to tensors before calling the
# tf.function. The error message here is raised during shape inference.
with context.graph_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
self._assertRaises(
x,
x_shape,
*config,
"Paddings must be non-negative",
use_tensor_inputs_options=[True])
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
def testInvalidInput(self):
# Test case for GitHub issue 46890.
if test_util.is_xla_enabled():
# TODO(b/200850176): test fails with XLA.
return
with self.session():
with self.assertRaises(errors_impl.InvalidArgumentError):
v = image_ops.pad_to_bounding_box(
image=np.ones((1, 1, 1)),
target_height=5191549470,
target_width=5191549470,
offset_height=1,
offset_width=1)
self.evaluate(v)
class InternalPadToBoundingBoxTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _InternalPadToBoundingBox(self, x, offset_height, offset_width,
target_height, target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def pad_bbox(*args):
return image_ops.pad_to_bounding_box_internal(*args, check_dims=False)
with self.cached_session():
return self.evaluate(
pad_bbox(x_tensor, offset_height, offset_width, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._InternalPadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box_internal(
image, 0, 0, height, width, check_dims=False)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box_internal(
x, i[0], i[1], i[2], i[3], check_dims=False)
with self.cached_session():
self.assertAllClose(y, self.evaluate(y_tf))
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box_internal(
image, 0, 0, 55, 66, check_dims=False)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.cached_session():
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in range(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_t = ops.convert_to_tensor(min_object_covered)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_t,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in range(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.cached_session():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
def _testStatelessSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered,
aspect_ratio_range, area_range):
with test_util.use_gpu():
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
iterations = 2
test_seeds = [(1, 2), (3, 4), (5, 6)]
for seed in test_seeds:
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
for _ in range(iterations):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(bounding_box_np,
dtype=dtypes.float32,
shape=bounding_box_np.shape)
begin, size, _ = image_ops.stateless_sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
seed=seed,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratio = area / original_area
area_ratios.append(area_ratio)
fraction_object_covered.append(
float(np.sum(y_tf)) / bounding_box_area)
# Check that `area_ratio` is within valid range.
self.assertLessEqual(area_ratio, area_range[1])
self.assertGreaterEqual(area_ratio, area_range[0])
# Each array should consist of one value just repeated `iteration` times
# because the same seed is used.
self.assertEqual(len(set(aspect_ratios)), 1)
self.assertEqual(len(set(area_ratios)), 1)
self.assertEqual(len(set(fraction_object_covered)), 1)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWholeImageBoundingBoxStateless(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWithBoundingBoxStateless(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
# Test both scalar and tensor input for `min_object_covered`.
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShapeStateless(self):
with test_util.use_gpu():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
bbox_func = functools.partial(
image_ops.stateless_sample_distorted_bounding_box,
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Check error is raised with wrong seed shapes.
for seed in [1, (1, 2, 3)]:
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
begin, end, bbox_for_drawing = bbox_func(seed=seed)
test_seed = (1, 2)
begin, end, bbox_for_drawing = bbox_func(seed=test_seed)
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
self.assertAllEqual([3], begin.shape)
self.assertAllEqual([3], end.shape)
self.assertAllEqual([1, 1, 4], bbox_for_drawing.shape)
def testDeterminismExceptionThrowing(self):
with test_util.deterministic_ops():
with self.assertRaisesRegex(
ValueError, "requires a non-zero seed to be passed in when "
"determinism is enabled"):
image_ops_impl.sample_distorted_bounding_box_v2(
image_size=[50, 50, 1],
bounding_boxes=[[[0., 0., 1., 1.]]],
)
image_ops_impl.sample_distorted_bounding_box_v2(
image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]], seed=1)
with self.assertRaisesRegex(
ValueError, 'requires "seed" or "seed2" to be non-zero when '
"determinism is enabled"):
image_ops_impl.sample_distorted_bounding_box(
image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]])
image_ops_impl.sample_distorted_bounding_box(
image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]], seed=1)
class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase):
METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,
image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC
]
# Some resize methods, such as Gaussian, are non-interpolating in that they
# change the image even if there is no scale change, for some test, we only
# check the value on the value preserving methods.
INTERPOLATING_METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images_v2(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
# half_pixel_centers unsupported in ResizeBilinear
@test_util.disable_xla("b/127616992")
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images_v2(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethod.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2],
[
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images_v2(image, target_shape, method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images_v2(image, target_shape, target_method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
# half_pixel_centers not supported by XLA
@test_util.disable_xla("b/127616992")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(
image, [target_height, target_width], method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.LANCZOS3] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data[image_ops.ResizeMethod.LANCZOS5] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data[image_ops.ResizeMethod.GAUSSIAN] = [
61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,
41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,
47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619
]
expected_data[image_ops.ResizeMethod.BICUBIC] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [
66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,
39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,
43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739
]
for nptype in self.TYPES:
for method in expected_data:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-04)
# XLA doesn't implement half_pixel_centers
@test_util.disable_xla("b/127616992")
def testLegacyBicubicMethodsMatchNewMethods(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"),
(gen_image_ops.resize_bicubic, "keyscubic"))
for legacy_method, new_method in methods_to_test:
with self.cached_session():
img_np = np.array(data, dtype=np.float32).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
legacy_result = legacy_method(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
half_pixel_centers=True)
scale = (
constant_op.constant([target_height, target_width],
dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))
new_result = gen_image_ops.scale_and_translate(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
scale,
array_ops.zeros([2]),
kernel_type=new_method,
antialias=False)
self.assertAllClose(
self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session():
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testBfloat16MultipleOps(self):
target_height = 8
target_width = 12
img = np.random.uniform(0, 100, size=(30, 10, 2)).astype(np.float32)
img_bf16 = ops.convert_to_tensor(img, dtype="bfloat16")
new_size = constant_op.constant([target_height, target_width])
img_methods = [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA
]
for method in img_methods:
out_op_bf16 = image_ops.resize_images_v2(img_bf16, new_size, method)
out_op_f32 = image_ops.resize_images_v2(img, new_size, method)
bf16_val = self.evaluate(out_op_bf16)
f32_val = self.evaluate(out_op_f32)
self.assertAllClose(bf16_val, f32_val, rtol=1e-2, atol=1e-2)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = (max_h, max_w)
x_tensor = x
def resize_func(t,
target_max=target_max,
preserve_aspect_ratio=preserve_aspect_ratio):
return image_ops.resize_images(
t, ops.convert_to_tensor(target_max),
preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session():
return self.evaluate(resize_func(x_tensor))
def _assertResizeEqual(self,
x,
x_shape,
y,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self,
x,
x_shape,
target_shape,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 80, 10]
x = np.random.uniform(size=x_shape)
for preserve_aspect_ratio in [True, False]:
with self.subTest(preserve_aspect_ratio=preserve_aspect_ratio):
expect_shape = [10, 250, 200, 10] if preserve_aspect_ratio \
else [10, 250, 250, 10]
self._assertResizeCheckShape(
x,
x_shape, [250, 250],
expect_shape,
preserve_aspect_ratio=preserve_aspect_ratio)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
def testLargeDim(self):
with self.session():
with self.assertRaises(errors.InvalidArgumentError):
x = np.ones((5, 1, 1, 2))
v = image_ops.resize_images_v2(x, [1610637938, 1610637938],
image_ops.ResizeMethod.BILINEAR)
_ = self.evaluate(v)
class ResizeImagesTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
METHODS = [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethodV1.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images(image, target_shape, target_method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=False)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethodV1.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=True)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.BICUBIC)
resized = self.evaluate(y)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session():
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.BILINEAR,
align_corners=align_corners)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
img_shape = [1, 3, 2, 1]
with self.cached_session():
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = [max_h, max_w]
x_tensor = x
y = image_ops.resize_images(
x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session():
return self.evaluate(y)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session():
return self.evaluate(
image_ops.resize_image_with_pad_v1(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# half_pixel_centers not supported by XLA
@test_util.for_all_test_methods(test_util.disable_xla, "b/127616992")
class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session():
return self.evaluate(
image_ops.resize_image_with_pad_v2(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 3.5, 5.5, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def resize_crop_or_pad(*args):
return image_ops.resize_image_with_crop_or_pad(*args)
with self.cached_session():
return self.evaluate(
resize_crop_or_pad(x_tensor, target_height, target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def simple_color_ramp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session():
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.cached_session():
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = self.evaluate([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window, channels=3)
# Combined decode+crop should have the same shape inference on image
# sizes.
image1_shape = image1_crop.get_shape().as_list()
image2_shape = image2.get_shape().as_list()
self.assertAllEqual(image1_shape, image2_shape)
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = self.evaluate([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Invalid JPEG data or crop window"):
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
self.evaluate(result)
def testSynthetic(self):
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.cached_session():
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = self.evaluate([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session():
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
self.assertAllEqual(image_shape, [256, 128, 3])
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.cached_session():
jpeg = io_ops.read_file(path)
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
# Cmyk jpeg image has 4 channels.
self.assertAllEqual(image_shape, [256, 128, 4])
def testRandomJpegQuality(self):
# Previous implementation of random_jpeg_quality had a bug.
# This unit test tests the fixed version, but due to forward compatibility
# this test can only be done when fixed version is used.
# Test jpeg quality dynamic randomization.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100)
with self.cached_session() as sess:
# Test randomization.
random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)]
are_images_equal = []
for i in range(1, len(random_jpeg_images)):
# Most of them should be different if randomization is occurring
# correctly.
are_images_equal.append(
np.array_equal(random_jpeg_images[0], random_jpeg_images[i]))
self.assertFalse(all(are_images_equal))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testStatelessRandomJpegQuality(self):
# Test deterministic randomness in jpeg quality by checking that the same
# sequence of jpeg quality adjustments are returned each round given the
# same seed.
with test_util.use_gpu():
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
jpeg_quality = (40, 100)
seeds_list = [(1, 2), (3, 4)]
iterations = 2
random_jpeg_images_all = [[] for _ in range(iterations)]
for random_jpeg_images in random_jpeg_images_all:
for seed in seeds_list:
distorted_jpeg = image_ops.stateless_random_jpeg_quality(
image, jpeg_quality[0], jpeg_quality[1], seed=seed)
# Verify that the random jpeg image is different from the original
# jpeg image.
self.assertNotAllEqual(image, distorted_jpeg)
random_jpeg_images.append(self.evaluate(distorted_jpeg))
# Verify that the results are identical given the same seed.
for i in range(1, iterations):
self.assertAllEqual(random_jpeg_images_all[0],
random_jpeg_images_all[i])
def testAdjustJpegQuality(self):
# Test if image_ops.adjust_jpeg_quality works when jpeq quality
# is an int (not tensor) for backward compatibility.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
jpeg_quality = np.random.randint(40, 100)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality(
image, jpeg_quality)
with self.cached_session() as sess:
sess.run(adjust_jpeg_quality_image)
def testAdjustJpegQualityShape(self):
with self.cached_session():
image = constant_op.constant(
np.arange(24, dtype=np.uint8).reshape([2, 4, 3]))
adjusted_image = image_ops.adjust_jpeg_quality(image, 80)
adjusted_image.shape.assert_is_compatible_with([None, None, 3])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session():
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = self.evaluate([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session():
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
def testAnimatedGif(self):
# Test if all frames in the animated GIF file is properly decoded.
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif = io_ops.read_file(os.path.join(base, "pendulum_sm.gif"))
gt_frame0 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame0.png"))
gt_frame1 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame1.png"))
gt_frame2 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame2.png"))
image = image_ops.decode_gif(gif)
frame0 = image_ops.decode_png(gt_frame0)
frame1 = image_ops.decode_png(gt_frame1)
frame2 = image_ops.decode_png(gt_frame2)
image, frame0, frame1, frame2 = self.evaluate([image, frame0, frame1,
frame2])
# Compare decoded gif frames with ground-truth data.
self.assertAllEqual(image[0], frame0)
self.assertAllEqual(image[1], frame1)
self.assertAllEqual(image[2], frame2)
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.cached_session():
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y, y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate, y_np, atol=1e-5)
def testNoConvert(self):
# Tests with Tensor.op requires a graph.
with ops.Graph().as_default():
# Make sure converting to the same data type creates only an identity op
with self.cached_session():
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEqual(y.op.type, "Identity")
self.assertEqual(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.cached_session():
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.cached_session():
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.cached_session():
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.cached_session():
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.cached_session():
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = self.evaluate(y)
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
# TODO(b/133851381): re-enable this test.
def disabledtestTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var) # pylint: disable=invalid-unary-operand-type
# Scale the pixel-values by a float. This scales the total variation as
# well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = self.evaluate(io_ops.read_file(os.path.join(prefix, path)))
images = {}
for name, decode in decoders.items():
image = self.evaluate(decode(contents))
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class CombinedNonMaxSuppressionTest(test_util.TensorFlowTestCase):
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another test `testInvalidTensorInput`
# which is identical to this one except that the input here is a scalar as
# opposed to a tensor.
def testInvalidPyInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another this test which is identical to
# `testInvalidPyInput` except that the input is a tensor here as opposed
# to a scalar.
def testInvalidTensorInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = ops.convert_to_tensor(2**31)
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
def testNonMaxSuppression(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [3, 0, 5])
def testInvalidShape(self):
def nms_func(box, score, max_output_size, iou_thres):
return image_ops.non_max_suppression(box, score, max_output_size,
iou_thres)
max_output_size = 3
iou_thres = 0.5
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, iou_thres)
# Dimensions must be 4 (but is 3)
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, iou_thres)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will be thrown bc 1 != 2.
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, iou_thres)
# The scores should be 1D of shape [num_boxes].
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
nms_func(boxes, scores, max_output_size, iou_thres)
# The max output size should be a scalar (0-D).
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, [[max_output_size]], iou_thres)
# The iou_threshold should be a scalar (0-D).
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, [[iou_thres]])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testTensors(self):
with context.eager_mode():
boxes_tensor = constant_op.constant([[6.625, 6.688, 272., 158.5],
[6.625, 6.75, 270.5, 158.4],
[5.375, 5., 272., 157.5]])
scores_tensor = constant_op.constant([0.84, 0.7944, 0.7715])
max_output_size = 100
iou_threshold = 0.5
score_threshold = 0.3
soft_nms_sigma = 0.25
pad_to_max_output_size = False
# gen_image_ops.non_max_suppression_v5.
for dtype in [np.float16, np.float32]:
boxes = math_ops.cast(boxes_tensor, dtype=dtype)
scores = math_ops.cast(scores_tensor, dtype=dtype)
_, _, num_selected = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma, pad_to_max_output_size)
self.assertEqual(num_selected.numpy(), 1)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = float("-inf")
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for input_dtype in [np.float16, np.float32]:
for threshold_dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=input_dtype)
scores = constant_op.constant(scores_np, dtype=input_dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(
iou_threshold_np, dtype=threshold_dtype)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v3
for input_dtype in [np.float16, np.float32]:
for threshold_dtype in [np.float16, np.float32]:
# XLA currently requires dtypes to be equal.
if input_dtype == threshold_dtype or not test_util.is_xla_enabled():
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=input_dtype)
scores = constant_op.constant(scores_np, dtype=input_dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(
iou_threshold_np, dtype=threshold_dtype)
score_threshold = constant_op.constant(
score_threshold_np, dtype=threshold_dtype)
selected_indices = gen_image_ops.non_max_suppression_v3(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
for input_dtype in [np.float16, np.float32]:
for threshold_dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=input_dtype)
scores = constant_op.constant(scores_np, dtype=input_dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(
iou_threshold_np, dtype=threshold_dtype)
score_threshold = constant_op.constant(
score_threshold_np, dtype=threshold_dtype)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v5.
soft_nms_sigma_np = float(0.0)
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype)
selected_indices, _, _ = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
def testZeroIOUThreshold(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [1., 1., 1., 1., 1., 1.]
max_output_size_np = 3
iou_threshold_np = 0.0
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [0, 3, 5])
class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase):
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersWithSoftNMS(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 6
iou_threshold_np = 0.5
score_threshold_np = 0.0
soft_nms_sigma_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np)
selected_indices, selected_scores = \
image_ops.non_max_suppression_with_scores(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma)
selected_indices, selected_scores = self.evaluate(
[selected_indices, selected_scores])
self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2])
self.assertAllClose(selected_scores,
[0.95, 0.9, 0.384, 0.3, 0.256, 0.197],
rtol=1e-2, atol=1e-2)
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold):
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
yp, nvp = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
y, n = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(yp.shape.is_fully_defined(), True)
self.assertEqual(y.shape.is_fully_defined(), False)
return yp, nvp, y, n
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
selected_indices_padded, num_valid_padded, selected_indices, num_valid = \
func(boxes_np, scores_np, max_output_size_np, iou_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(self.evaluate(num_valid_padded), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(self.evaluate(num_valid), 3)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold, score_threshold):
boxes = constant_op.constant(boxes)
scores = constant_op.constant(scores)
max_output_size = constant_op.constant(max_output_size)
iou_threshold = constant_op.constant(iou_threshold)
score_threshold = constant_op.constant(score_threshold)
y, nv = image_ops.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold, score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(y.shape.is_fully_defined(), False)
return y, nv
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
selected_indices, num_valid = func(boxes_np, scores_np,
max_output_size_np, iou_threshold_np,
score_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(self.evaluate(num_valid), 3)
def testInvalidDtype(self):
boxes_np = [[4.0, 6.0, 3.0, 6.0],
[2.0, 1.0, 5.0, 4.0],
[9.0, 0.0, 9.0, 9.0]]
scores = [5.0, 6.0, 5.0]
max_output_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError), "type int64 that does not match type int32"):
boxes = constant_op.constant(boxes_np)
image_ops.non_max_suppression_padded(boxes, scores, max_output_size)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_output_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices, [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
def testWrongDims(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.cached_session() as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
def testShapeMismatch(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.cached_session() as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(
img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session():
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session():
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.cached_session():
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = self.evaluate(image_ops.psnr(tf_q20, tf_q72, 1, "psnr1"))
tf_psnr2 = self.evaluate(image_ops.psnr(tf_q20, tf_q95, 1, "psnr2"))
tf_psnr3 = self.evaluate(image_ops.psnr(tf_q72, tf_q95, 1, "psnr3"))
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.cached_session():
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_q20, tf_q20, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.cached_session():
self.assertAllClose(
self.evaluate(psnr_uint8), self.evaluate(psnr_float32), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBatchNumpyInputs(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
with self.cached_session():
img1 = self.evaluate(constant_op.constant(img1))
img2 = self.evaluate(constant_op.constant(img2))
ssim = image_ops.ssim(
img1,
img2,
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertLess(self.evaluate(ssim), 0)
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim_multiscale(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
@def_function.function
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testUnweightedIsDifferentiableEager(self):
if not context.executing_eagerly():
self.skipTest("Eager mode only")
img = self._LoadTestImages()
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.cached_session() as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [
image_ops.ssim_multiscale(
x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
for x, y in itertools.combinations(images, 2)
]
msssim = self.evaluate(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
def testNumpyInput(self):
"""Test case for GitHub issue 28241."""
image = np.random.random([512, 512, 1])
score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)
with self.cached_session():
_ = self.evaluate(score_tensor)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def disabled_testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.cached_session():
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.cached_session():
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected_batch, actual_sobel)
@test_util.run_all_in_graph_and_eager_modes
class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
_FORWARD_COMPATIBILITY_HORIZONS = [
(2020, 1, 1),
(2020, 7, 14),
(2525, 1, 1), # future behavior
]
def testBmpChannels(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with test_util.use_gpu():
base = "tensorflow/core/lib/bmp/testdata"
# `rgba_transparent.bmp` has 4 channels with transparent pixels.
# Test consistency between `decode_image` and `decode_bmp` functions.
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
image0 = image_ops.decode_image(bmp0, channels=4)
image1 = image_ops.decode_bmp(bmp0, channels=4)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 4 channels.
# Note that this operation simply drops 4th channel information. This
# is the same behavior as `decode_png`.
# e.g. pixel values [25, 25, 25, 100] becomes [25, 25, 25].
bmp1 = io_ops.read_file(os.path.join(base, "rgb_small.bmp"))
image2 = image_ops.decode_bmp(bmp0, channels=3)
image3 = image_ops.decode_bmp(bmp1)
image2, image3 = self.evaluate([image2, image3])
self.assertAllEqual(image2, image3)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 3 channels. Alpha channel should be set to
# UINT8_MAX.
bmp3 = io_ops.read_file(os.path.join(base, "rgb_small_255.bmp"))
bmp4 = io_ops.read_file(os.path.join(base, "rgba_small_255.bmp"))
image4 = image_ops.decode_bmp(bmp3, channels=4)
image5 = image_ops.decode_bmp(bmp4)
image4, image5 = self.evaluate([image4, image5])
self.assertAllEqual(image4, image5)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 1 channel (grayscale).
bmp6 = io_ops.read_file(os.path.join(base, "grayscale_small.bmp"))
bmp7 = io_ops.read_file(
os.path.join(base, "grayscale_small_3channels.bmp"))
image6 = image_ops.decode_bmp(bmp6, channels=3)
image7 = image_ops.decode_bmp(bmp7)
image6, image7 = self.evaluate([image6, image7])
self.assertAllEqual(image6, image7)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 1 channel (grayscale). Alpha channel should be
# set to UINT8_MAX.
bmp9 = io_ops.read_file(
os.path.join(base, "grayscale_small_4channels.bmp"))
image8 = image_ops.decode_bmp(bmp6, channels=4)
image9 = image_ops.decode_bmp(bmp9)
image8, image9 = self.evaluate([image8, image9])
self.assertAllEqual(image8, image9)
def testJpegUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# NumPy conversions should happen before
x = np.random.randint(256, size=(4, 4, 3), dtype=np.uint16)
x_str = image_ops_impl.encode_png(x)
x_dec = image_ops_impl.decode_image(
x_str, channels=3, dtype=dtypes.uint16)
self.assertAllEqual(x, x_dec)
def testGifUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testExpandAnimations(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
# Test `expand_animations=False` case.
image0 = image_ops.decode_image(
gif0, dtype=dtypes.float32, expand_animations=False)
# image_ops.decode_png() handles GIFs and returns 3D tensors
animation = image_ops.decode_gif(gif0)
first_frame = array_ops.gather(animation, 0)
image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertLen(image0.shape, 3)
self.assertAllEqual(list(image0.shape), [40, 20, 3])
self.assertAllEqual(image0, image1)
# Test `expand_animations=True` case.
image2 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image3 = image_ops.convert_image_dtype(animation, dtypes.float32)
image2, image3 = self.evaluate([image2, image3])
self.assertLen(image2.shape, 4)
self.assertAllEqual(list(image2.shape), [12, 40, 20, 3])
self.assertAllEqual(image2, image3)
def testImageCropAndResize(self):
if test_util.is_gpu_available():
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
else:
message = "Boxes contains at least one element that is not finite"
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
message):
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
def testImageCropAndResizeWithInvalidInput(self):
with self.session():
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
op = image_ops_impl.crop_and_resize_v2(
image=np.ones((1, 1, 1, 1)),
boxes=np.ones((11, 4)),
box_indices=np.ones((11)),
crop_size=[2065374891, 1145309325])
self.evaluate(op)
@parameterized.named_parameters(
("_jpeg", "JPEG", "jpeg_merge_test1.jpg"),
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
)
def testWrongOpBmp(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = "Trying to decode " + img_format + " format using DecodeBmp op"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_bmp(img_bytes)
self.evaluate(img)
@parameterized.named_parameters(
("_jpeg", image_ops.decode_jpeg, "DecodeJpeg"),
("_png", image_ops.decode_png, "DecodePng"),
("_gif", image_ops.decode_gif, "DecodeGif"),
)
def testWrongOp(self, decode_op, op_used):
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
err_msg = ("Trying to decode BMP format using a wrong op. Use `decode_bmp` "
"or `decode_image` instead. Op used: ") + op_used
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img = decode_op(bmp0)
self.evaluate(img)
@parameterized.named_parameters(
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
("_bmp", "BMP", "rgba_small.bmp"),
)
def testWrongOpJpeg(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = ("DecodeAndCropJpeg operation can run on JPEG only, but "
"detected ") + img_format
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_and_crop_jpeg(img_bytes, [1, 1, 2, 2])
self.evaluate(img)
def testGifFramesWithDiffSize(self):
"""Test decoding an animated GIF.
This test verifies that `decode_image` op can decode animated GIFs whose
first frame does not fill the canvas. The unoccupied areas should be filled
with zeros (black).
`squares.gif` is animated with two images of different sizes. It
alternates between a smaller image of size 10 x 10 and a larger image of
size 16 x 16. Because it starts animating with the smaller image, the first
frame does not fill the canvas. (Canvas size is equal to max frame width x
max frame height.)
`red_black.gif` has just a single image in a GIF format. It is the same
image as the smaller image (size 10 x 10) of the two images in
`squares.gif`. The only difference is that its background (canvas - smaller
image) is pre-filled with zeros (black); it is the groundtruth.
"""
base = "tensorflow/core/lib/gif/testdata"
gif_bytes0 = io_ops.read_file(os.path.join(base, "squares.gif"))
image0 = image_ops.decode_image(gif_bytes0, dtype=dtypes.float32,
expand_animations=False)
gif_bytes1 = io_ops.read_file(os.path.join(base, "red_black.gif"))
image1 = image_ops.decode_image(gif_bytes1, dtype=dtypes.float32)
image1_0 = array_ops.gather(image1, 0)
image0, image1_0 = self.evaluate([image0, image1_0])
self.assertAllEqual(image0, image1_0)
if __name__ == "__main__":
googletest.main()
| 39.595242
| 80
| 0.633928
|
d66133add19e25fccf06a37c842504eabe306ff1
| 1,137
|
py
|
Python
|
sdk/python/pulumi_azure_native/securityandcompliance/v20210111/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_native/securityandcompliance/v20210111/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_native/securityandcompliance/v20210111/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'Kind',
'ManagedServiceIdentityType',
'PrivateEndpointServiceConnectionStatus',
'PublicNetworkAccess',
]
class Kind(str, Enum):
"""
The kind of the service.
"""
FHIR = "fhir"
FHIR_STU3 = "fhir-Stu3"
FHIR_R4 = "fhir-R4"
class ManagedServiceIdentityType(str, Enum):
"""
Type of identity being specified, currently SystemAssigned and None are allowed.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
NONE = "None"
class PrivateEndpointServiceConnectionStatus(str, Enum):
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
class PublicNetworkAccess(str, Enum):
"""
Control permission for data plane traffic coming from public networks while private endpoint is enabled.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
| 24.191489
| 108
| 0.677221
|
91ea91af6f5b97d82d76bd72f14e8e047a93e113
| 9,429
|
py
|
Python
|
YorForger/modules/ImageEditor/edit_1.py
|
Voidxtoxic/kita
|
b2a3007349727280e149dcca017413d7dc2e7648
|
[
"MIT"
] | null | null | null |
YorForger/modules/ImageEditor/edit_1.py
|
Voidxtoxic/kita
|
b2a3007349727280e149dcca017413d7dc2e7648
|
[
"MIT"
] | null | null | null |
YorForger/modules/ImageEditor/edit_1.py
|
Voidxtoxic/kita
|
b2a3007349727280e149dcca017413d7dc2e7648
|
[
"MIT"
] | null | null | null |
# By @TroJanzHEX
import os
import shutil
import cv2
from PIL import Image, ImageEnhance, ImageFilter
async def bright(client, message):
try:
userid = str(message.chat.id)
if not os.path.isdir(f"./DOWNLOADS/{userid}"):
os.makedirs(f"./DOWNLOADS/{userid}")
download_location = "./DOWNLOADS" + "/" + userid + "/" + userid + ".jpg"
edit_img_loc = "./DOWNLOADS" + "/" + userid + "/" + "brightness.jpg"
if not message.reply_to_message.empty:
msg = await message.reply_to_message.reply_text(
"Downloading image", quote=True
)
a = await client.download_media(
message=message.reply_to_message, file_name=download_location
)
await msg.edit("Processing Image...")
image = Image.open(a)
brightness = ImageEnhance.Brightness(image)
brightness.enhance(1.5).save(edit_img_loc)
await message.reply_chat_action("upload_photo")
await message.reply_to_message.reply_photo(edit_img_loc, quote=True)
await msg.delete()
else:
await message.reply_text("Why did you delete that??")
try:
shutil.rmtree(f"./DOWNLOADS/{userid}")
except Exception:
pass
except Exception as e:
print("bright-error - " + str(e))
if "USER_IS_BLOCKED" in str(e):
return
else:
try:
await message.reply_to_message.reply_text(
"Something went wrong!", quote=True
)
except Exception:
return
async def mix(client, message):
try:
userid = str(message.chat.id)
if not os.path.isdir(f"./DOWNLOADS/{userid}"):
os.makedirs(f"./DOWNLOADS/{userid}")
download_location = "./DOWNLOADS" + "/" + userid + "/" + userid + ".jpg"
edit_img_loc = "./DOWNLOADS" + "/" + userid + "/" + "mix.jpg"
if not message.reply_to_message.empty:
msg = await message.reply_to_message.reply_text(
"Downloading image", quote=True
)
a = await client.download_media(
message=message.reply_to_message, file_name=download_location
)
await msg.edit("Processing Image...")
image = Image.open(a)
red, green, blue = image.split()
new_image = Image.merge("RGB", (green, red, blue))
new_image.save(edit_img_loc)
await message.reply_chat_action("upload_photo")
await message.reply_to_message.reply_photo(edit_img_loc, quote=True)
await msg.delete()
else:
await message.reply_text("Why did you delete that??")
try:
shutil.rmtree(f"./DOWNLOADS/{userid}")
except Exception:
pass
except Exception as e:
print("mix-error - " + str(e))
if "USER_IS_BLOCKED" in str(e):
return
else:
try:
await message.reply_to_message.reply_text(
"Something went wrong!", quote=True
)
except Exception:
return
async def black_white(client, message):
try:
userid = str(message.chat.id)
if not os.path.isdir(f"./DOWNLOADS/{userid}"):
os.makedirs(f"./DOWNLOADS/{userid}")
download_location = "./DOWNLOADS" + "/" + userid + "/" + userid + ".jpg"
edit_img_loc = "./DOWNLOADS" + "/" + userid + "/" + "black_white.jpg"
if not message.reply_to_message.empty:
msg = await message.reply_to_message.reply_text(
"Downloading image", quote=True
)
a = await client.download_media(
message=message.reply_to_message, file_name=download_location
)
await msg.edit("Processing Image...")
image_file = cv2.imread(a)
grayImage = cv2.cvtColor(image_file, cv2.COLOR_BGR2GRAY)
cv2.imwrite(edit_img_loc, grayImage)
await message.reply_chat_action("upload_photo")
await message.reply_to_message.reply_photo(edit_img_loc, quote=True)
await msg.delete()
else:
await message.reply_text("Why did you delete that??")
try:
shutil.rmtree(f"./DOWNLOADS/{userid}")
except Exception:
pass
except Exception as e:
print("black_white-error - " + str(e))
if "USER_IS_BLOCKED" in str(e):
return
else:
try:
await message.reply_to_message.reply_text(
"Something went wrong!", quote=True
)
except Exception:
return
async def normal_blur(client, message):
try:
userid = str(message.chat.id)
if not os.path.isdir(f"./DOWNLOADS/{userid}"):
os.makedirs(f"./DOWNLOADS/{userid}")
download_location = "./DOWNLOADS" + "/" + userid + "/" + userid + ".jpg"
edit_img_loc = "./DOWNLOADS" + "/" + userid + "/" + "BlurImage.jpg"
if not message.reply_to_message.empty:
msg = await message.reply_to_message.reply_text(
"Downloading image", quote=True
)
a = await client.download_media(
message=message.reply_to_message, file_name=download_location
)
await msg.edit("Processing Image...")
OriImage = Image.open(a)
blurImage = OriImage.filter(ImageFilter.BLUR)
blurImage.save(edit_img_loc)
await message.reply_chat_action("upload_photo")
await message.reply_to_message.reply_photo(edit_img_loc, quote=True)
await msg.delete()
else:
await message.reply_text("Why did you delete that??")
try:
shutil.rmtree(f"./DOWNLOADS/{userid}")
except Exception:
pass
except Exception as e:
print("normal_blur-error - " + str(e))
if "USER_IS_BLOCKED" in str(e):
return
else:
try:
await message.reply_to_message.reply_text(
"Something went wrong!", quote=True
)
except Exception:
return
async def g_blur(client, message):
try:
userid = str(message.chat.id)
if not os.path.isdir(f"./DOWNLOADS/{userid}"):
os.makedirs(f"./DOWNLOADS/{userid}")
download_location = "./DOWNLOADS" + "/" + userid + "/" + userid + ".jpg"
edit_img_loc = "./DOWNLOADS" + "/" + userid + "/" + "gaussian_blur.jpg"
if not message.reply_to_message.empty:
msg = await message.reply_to_message.reply_text(
"Downloading image", quote=True
)
a = await client.download_media(
message=message.reply_to_message, file_name=download_location
)
await msg.edit("Processing Image...")
im1 = Image.open(a)
im2 = im1.filter(ImageFilter.GaussianBlur(radius=5))
im2.save(edit_img_loc)
await message.reply_chat_action("upload_photo")
await message.reply_to_message.reply_photo(edit_img_loc, quote=True)
await msg.delete()
else:
await message.reply_text("Why did you delete that??")
try:
shutil.rmtree(f"./DOWNLOADS/{userid}")
except Exception:
pass
except Exception as e:
print("g_blur-error - " + str(e))
if "USER_IS_BLOCKED" in str(e):
return
else:
try:
await message.reply_to_message.reply_text(
"Something went wrong!", quote=True
)
except Exception:
return
async def box_blur(client, message):
try:
userid = str(message.chat.id)
if not os.path.isdir(f"./DOWNLOADS/{userid}"):
os.makedirs(f"./DOWNLOADS/{userid}")
download_location = "./DOWNLOADS" + "/" + userid + "/" + userid + ".jpg"
edit_img_loc = "./DOWNLOADS" + "/" + userid + "/" + "box_blur.jpg"
if not message.reply_to_message.empty:
msg = await message.reply_to_message.reply_text(
"Downloading image", quote=True
)
a = await client.download_media(
message=message.reply_to_message, file_name=download_location
)
await msg.edit("Processing Image...")
im1 = Image.open(a)
im2 = im1.filter(ImageFilter.BoxBlur(0))
im2.save(edit_img_loc)
await message.reply_chat_action("upload_photo")
await message.reply_to_message.reply_photo(edit_img_loc, quote=True)
await msg.delete()
else:
await message.reply_text("Why did you delete that??")
try:
shutil.rmtree(f"./DOWNLOADS/{userid}")
except Exception:
pass
except Exception as e:
print("box_blur-error - " + str(e))
if "USER_IS_BLOCKED" in str(e):
return
else:
try:
await message.reply_to_message.reply_text(
"Something went wrong!", quote=True
)
except Exception:
return
| 38.020161
| 80
| 0.552232
|
4b9b9a944dd1cd337f0f278193c970003ac2818b
| 5,594
|
py
|
Python
|
juneberry/plotting.py
|
sei-nmvanhoudnos/Juneberry
|
a4824bc74180134a9ef5326addbc83110177102c
|
[
"MIT"
] | null | null | null |
juneberry/plotting.py
|
sei-nmvanhoudnos/Juneberry
|
a4824bc74180134a9ef5326addbc83110177102c
|
[
"MIT"
] | null | null | null |
juneberry/plotting.py
|
sei-nmvanhoudnos/Juneberry
|
a4824bc74180134a9ef5326addbc83110177102c
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
"""
A set of plotting utilities.
"""
# ==========================================================================================================================================================
# Copyright 2021 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS"
# BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER
# INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED
# FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM
# FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD (SEI)-style license, please see license.txt
# or contact permission@sei.cmu.edu for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see
# Copyright notice for non-US Government use and distribution.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Pytorch (https://github.com/pytorch/pytorch/blob/master/LICENSE) Copyright 2016 facebook, inc..
# 2. NumPY (https://github.com/numpy/numpy/blob/master/LICENSE.txt) Copyright 2020 Numpy developers.
# 3. Matplotlib (https://matplotlib.org/3.1.1/users/license.html) Copyright 2013 Matplotlib Development Team.
# 4. pillow (https://github.com/python-pillow/Pillow/blob/master/LICENSE) Copyright 2020 Alex Clark and contributors.
# 5. SKlearn (https://github.com/scikit-learn/sklearn-docbuilder/blob/master/LICENSE) Copyright 2013 scikit-learn
# developers.
# 6. torchsummary (https://github.com/TylerYep/torch-summary/blob/master/LICENSE) Copyright 2020 Tyler Yep.
# 7. adversarial robust toolbox (https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/main/LICENSE)
# Copyright 2018 the adversarial robustness toolbox authors.
# 8. pytest (https://docs.pytest.org/en/stable/license.html) Copyright 2020 Holger Krekel and others.
# 9. pylint (https://github.com/PyCQA/pylint/blob/master/COPYING) Copyright 1991 Free Software Foundation, Inc..
# 10. python (https://docs.python.org/3/license.html#psf-license) Copyright 2001 python software foundation.
#
# DM20-1149
#
# ==========================================================================================================================================================
import json
import matplotlib.pyplot as plt
def plot_means_stds_layers(title, means, stds, output_filename) -> None:
"""
Generates a png plot to the specified file name that contains the means as a line
and the standard deviations as error bars.
:param title: The title for the plot.
:param means: The means to plot.
:param stds: The standard deviations.
:param output_filename: The file in which to place the output.
"""
plot_values_errors(title, means, stds, "Layers", "Means", output_filename)
def plot_values_errors(title, values, errors, x_label, y_label, output_name) -> None:
"""
Generates a plot to the specified file name that contains the values as a line
and the error values as error bars.
:param title: The title for the plot.
:param values: The means to plot.
:param errors: The standard deviations.
:param x_label: Label for the x-axis
:param y_label: Label for the y-axis
:param output_name: The file in which to place the output.
"""
layers = list(range(len(values)))
plt.plot(layers, values, linestyle='-', marker='o')
plt.errorbar(layers, values, errors, fmt='ok', lw=3)
plt.title(f"{y_label} across {x_label} of {title}")
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.savefig(output_name)
plt.close()
def plot_training_summary_chart(model_manager) -> None:
"""
Plots the accuracies and losses from the training output into an image.
:param model_manager: Model manager object that determines which model to process.
"""
with open(model_manager.get_training_out_file()) as json_file:
data = json.load(json_file)
results = data['trainingResults']
epochs = range(1, len(results['accuracy']) + 1)
fig, ax1 = plt.subplots()
plt.ylim(0.0, 1.0)
ax1.set_xlabel('Epoch')
# ================= Accuracy
color = 'tab:red'
ax1.set_ylabel('Accuracy', color=color)
ax1.plot(epochs, results['accuracy'], linestyle='-', marker='', color=color, label="Accuracy")
ax1.plot(epochs, results['valAccuracy'], linestyle='--', marker='', color=color, label="Validation Accuracy")
ax1.tick_params(axis='y', labelcolor=color)
ax1.legend(loc="upper center", bbox_to_anchor=(0.5, -0.15), ncol=2)
# ================= Loss
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Loss', color=color)
ax2.plot(epochs, results['loss'], linestyle='-', marker='', color=color, label="Loss", )
ax2.plot(epochs, results['valLoss'], linestyle='--', marker='', color=color, label="Validation Loss")
ax2.tick_params(axis='y', labelcolor=color)
ax2.legend(loc="upper center", bbox_to_anchor=(0.5, -0.25), ncol=2)
# ================= General
plt.title(f'Training results: {model_manager.model_name}')
# otherwise the right y-label is slightly clipped
fig.tight_layout()
# Save to disk
plt.savefig(model_manager.get_training_summary_plot())
| 47.008403
| 156
| 0.674115
|
67a54b7df56925dc131a5c214924fa2f5900846f
| 947
|
py
|
Python
|
openauth/migrations/0001_initial.py
|
daimon99/django-openauth
|
8b28fd70eb4a15190606894e8c2f2167ffdddb69
|
[
"Apache-2.0"
] | null | null | null |
openauth/migrations/0001_initial.py
|
daimon99/django-openauth
|
8b28fd70eb4a15190606894e8c2f2167ffdddb69
|
[
"Apache-2.0"
] | null | null | null |
openauth/migrations/0001_initial.py
|
daimon99/django-openauth
|
8b28fd70eb4a15190606894e8c2f2167ffdddb69
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-11-29 04:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('provider', models.CharField(max_length=128)),
('uid', models.CharField(max_length=256)),
('extra', models.TextField(blank=True, null=True)),
('created', models.DateTimeField()),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.655172
| 142
| 0.620908
|
dd9f35cd22ee07b7b54d1b7c11f629c1c8402d61
| 430
|
py
|
Python
|
opening pic.py
|
OSAMAMOHAMED1234/python_projects
|
fb4bc7356847c3f46df690a9386cf970377a6f7c
|
[
"MIT"
] | null | null | null |
opening pic.py
|
OSAMAMOHAMED1234/python_projects
|
fb4bc7356847c3f46df690a9386cf970377a6f7c
|
[
"MIT"
] | null | null | null |
opening pic.py
|
OSAMAMOHAMED1234/python_projects
|
fb4bc7356847c3f46df690a9386cf970377a6f7c
|
[
"MIT"
] | null | null | null |
import os
from PIL import Image
img = Image.open(os.path.join(os.path.dirname(__file__), '1.png')).show()
os.startfile(os.path.join(os.path.dirname(__file__), '1.png'))
img = Image.open(os.path.join(os.path.dirname(__file__), '1.png')).convert('L')
img.show()
img.save('2.jpg')
img = Image.open(os.path.join(os.path.dirname(__file__), '1.png'))#.convert('L')
new_img = img.resize((256,256))
new_img.save('2-256x256.png', 'png')
| 33.076923
| 80
| 0.693023
|
3e9929e3129addcfbcaefd34517b8aee3bd0c1dd
| 4,531
|
py
|
Python
|
input/test_beam_g10_l200.py
|
jsdomine/cosyr
|
a612b2a642c9e288975efbfdab5f1a26f2aaeeeb
|
[
"BSD-3-Clause"
] | null | null | null |
input/test_beam_g10_l200.py
|
jsdomine/cosyr
|
a612b2a642c9e288975efbfdab5f1a26f2aaeeeb
|
[
"BSD-3-Clause"
] | null | null | null |
input/test_beam_g10_l200.py
|
jsdomine/cosyr
|
a612b2a642c9e288975efbfdab5f1a26f2aaeeeb
|
[
"BSD-3-Clause"
] | null | null | null |
# -----------------------------------------
# - Input deck for realistic beam size -
# -----------------------------------------
import numpy as np
from input.utils import *
from input.misc import *
####################### Preprocessing ##########################
run_name = "test_beam_g10_l200"
## electron and trajectory
gamma=10
lbeam = 200 #3000 # beam length, in um
dbeam = 200 #50 # beam radius, in um
psi_max = 0.1 #0.42 # max retarded
## common mesh
box_beam_ratio = 2.0 # mesh size / beam size
scaled_alpha = lbeam*1e-6*gamma**3.0 * box_beam_ratio # scaled alpha range of mesh
scaled_chi = dbeam*1e-6*gamma**2.0 * box_beam_ratio # scaled chi range of mesh
if (mpi_rank==0) : print("scaled_alpha={}, scaled_chi={}".format(scaled_alpha, scaled_chi))
npt_alpha = 401 #1001 # number of mesh points along alpha
npt_chi = 401 #101 # number of mesh points along chi
if (mpi_rank==0) : print("npt_alpha={}, npt_chi={}".format(npt_alpha, npt_chi))
####################### Main setup ##########################
## wavelet emission
num_wavefronts = 400 # number of wavefronts
num_dirs = 400 # number of field lines
num_step = num_wavefronts # number of steps (currently always equal to num_wavefronts)
dt = psi_max/num_wavefronts # time step in electron rest frame
emission_interval = num_step-1 # only emit wavefronts at simulation end (test purpose)
## remap
remap_interval = num_step-1 # interval of doing remapping (in time steps)
remap_scatter = False # use scatter weights form for remap
remap_adaptive = False # use adaptive smoothing length for remap
remap_scaling[0] = 1.0 # support/smoothing length scaling factor
remap_scaling[1] = 1.0 # support/smoothing length scaling factor
remap_verbose = False # print remap statistics
# electron beam
beam_charge = 0.01 # nC
num_particles = 1*5 # number of particles
trajectory_type = 2 # 1: straight line, 2: circular, 3: sinusoidal
parameters[0] = gamma # central energy for all types
parameters[1] = 100.0 # propagation angle for type 1, radius (cm) for type 2, frequency for type 3
beam = init_beam(num_particles, gamma, lbeam, dbeam, mpi_rank)
#beam = generate_microbunches(overall_beam_env='gaussian', _npart=num_particles,
# _nbunches=1, _sgmx_sub_div=6.0, _lbeam=lbeam, _dbeam=dbeam, _mpi_rank=mpi_rank)
# del beam # init a single particle instead
## comoving mesh
num_gridpt_hor = npt_alpha # number of points in x-axis
num_gridpt_ver = npt_chi # number of points in y-axis
mesh_span_angle = scaled_alpha/gamma**3 # in radians
mesh_width = scaled_chi/gamma**2 # in unit of radius
# load wavelets
cosyr_root = '..'
path2subcycling = cosyr_root + "/input/wavelets/g10-200x200um-sub"
wavelet_x, wavelet_y, wavelet_field = load_wavelets(path2subcycling, fld_file="EsRad_sub.csv", unscale_coord=True, _gamma = gamma)
if (mpi_rank==0) :
print("wavelet shape =", wavelet_x.shape, wavelet_y.shape, wavelet_field.shape)
print("wavelet field 0 min/max =", wavelet_field.min(), wavelet_field.max())
num_wavelet_fields = 1
min_emit_angle = 0.0
# 0: use global (x,y) coordinate;
# 1: use local (x',y') coordinate;
# 2: (TODO) use local cylindrical coordinate
wavelet_type = 1
if (wavelet_type == 0):
rotation_angle = 0.1
wavelet_x, wavelet_y = convert2global(wavelet_x, wavelet_y, rotation_angle)
# True: loaded wavelets will be repeatedly emitted at each step and copied into internal wavelets array,
# otherwise only used when interpolation is done and not copied into internal wavelets array
use_wavelet_for_subcycle = True
num_wavelet_fields = 1
####################### Diagnostics ##########################
print_interval = 100 # interval for printing simulation steps
beam_output_interval = num_step - 1
mesh_output_interval = num_step - 1
wavelet_output_interval = num_step - 1
beam_output = True
if (mpi_rank==0):
make_output_dirs(run_name+"/beam", num_step, beam_output_start, beam_output_interval)
mesh_output = True
make_output_dirs(run_name+"/mesh", num_step, mesh_output_start, mesh_output_interval)
wavelet_output = True
make_output_dirs(run_name+"/wavelet", num_step, wavelet_output_start, wavelet_output_interval)
make_output_dirs(run_name+"/traj", num_step, wavelet_output_start, wavelet_output_interval)
| 43.990291
| 130
| 0.672258
|
e993a485c52dcd593de79eea8bf4e1f61babf585
| 17,613
|
py
|
Python
|
train.py
|
NiklasMWeber/CreditCycleForecasting
|
d50c799a33425a38853d36d61b3f6c3cd0a967d3
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
NiklasMWeber/CreditCycleForecasting
|
d50c799a33425a38853d36d61b3f6c3cd0a967d3
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
NiklasMWeber/CreditCycleForecasting
|
d50c799a33425a38853d36d61b3f6c3cd0a967d3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 4 00:03:12 2022
@author: nikth
"""
import numpy as np
import math
import iisignature as ii
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.preprocessing import StandardScaler, MaxAbsScaler
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
def get_sigX(X,m):
if m == 0:
return np.full((np.shape(X)[0], 1), 1)
else:
d = X.shape[2]
sigX = np.zeros((np.shape(X)[0], ii.siglength(d, m) + 1))
sigX[:, 0] = 1
for i in range(np.shape(X)[0]):
sigX[i, 1:] = ii.sig(X[i, :, :], m)
return sigX
def getKpen(X,Y,max_Kpen,rho = 0.25,alpha=None,normalizeFeatures = True, plotTrue = False ):
'''
- Finds K_pen following Birge a d Massart,
- alpha by Cross-validation during regression on order 1 Signature (-->
For this reason it will be a good idea to normalize signature entries)
- and returns the scaler to make it availbale for potential predicting.
Parameters
----------
X : TYPE
DESCRIPTION.
Y : TYPE
DESCRIPTION.
max_Kpen : TYPE
DESCRIPTION.
rho : TYPE, optional
DESCRIPTION. The default is 0.4.
alpha : TYPE, optional
DESCRIPTION. The default is None.
normalizeFeatures : TYPE, optional
DESCRIPTION. The default is True.
plotTrue : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
KpenVal : TYPE
DESCRIPTION.
alpha : TYPE
DESCRIPTION.
Scaler : StandardScaler
Used to normalize data
'''
dimPath = len(X[0][0])
nPaths = len(X)
m_max = 1
while ii.siglength(dimPath, m_max+1) < nPaths: m_max += 1
if plotTrue == True:
print('m_Max is '+ str(m_max))
Kpen = np.concatenate(( np.array([1e-6, 1e-5,1e-4,1e-3,1e-2,1e-1]) ,np.linspace(1,max_Kpen,max_Kpen)))
penList = []
losses = []
scalers = []
scaler = None
for m in range(1,m_max+1):
sigX = get_sigX(X,m)
if normalizeFeatures == True:
scaler = StandardScaler()
scaler.fit(sigX)
scalers.append(scaler)
sigX = scaler.transform(sigX)
if alpha is None: #set alpha by cross-validation in the first iteration of loop
alphas=np.linspace(10 ** (-6), 100, num=1000)
reg_cv = RidgeCV(alphas=alphas, store_cv_values=True, fit_intercept=False, gcv_mode='svd')
reg_cv.fit(sigX, Y)
alpha = reg_cv.alpha_
reg = Ridge(alpha = alpha, fit_intercept=False)
reg.fit(sigX,Y)
predict_train = reg.predict(sigX)
pen = Kpen.reshape((1,len(Kpen)))/(nPaths**rho)*math.sqrt(ii.siglength(dimPath,m))
penList.append(pen)
#squareLoss = sum((Y_test-predict_test)**2)
squareLoss = sum((Y-predict_train)**2)/len(Y)
losses.append(squareLoss)
# The following part tries to find the first bigger jump (Birge, Massart)
LossKpenMatrix = np.array(losses).reshape((len(losses),1))+np.array(penList).reshape((len(losses),len(Kpen)))
mHat = np.argmin(LossKpenMatrix, axis=0)+1
if plotTrue == True:
plt.figure()
plt.plot(np.linspace(1,len(Kpen), num = len(Kpen)),mHat)
jumps = -mHat[1:] + mHat[:-1]
quantile = np.quantile(jumps, 0.25)
tmp = np.where(jumps>=max(1,quantile))
try:
#tmp2 = tmp[0]
KpenVal = 2*(Kpen[min(tmp[0])+1])
except:
KpenVal = 2*Kpen[0]
print("Warning: No jumps for Kpen extraction found")
return KpenVal
def getmHat(X,Y, Kpen,rho = 0.25,m_max = None,alpha=None,normalizeFeatures = True, plotTrue = False, mHatInput= None ):
mHat = 1
dimPath = len(X[0][0])
nPaths = len(X)
if m_max == None:
m_max = 1
while ii.siglength(dimPath, m_max+1) < nPaths*10: m_max += 1
if plotTrue == True:
print('m_max is '+ str(m_max))
losses = []
penalizedLosses = []
scalers = []
regs = []
scaler = None
for m in range(1,m_max+1):
sigX = get_sigX(X,m)
if normalizeFeatures == True:
scaler = StandardScaler()
scaler.fit(sigX)
scalers.append(scaler)
sigX = scaler.transform(sigX)
if alpha is None: #select alpha by cross-validation in the first iteration of loop
alphas=np.linspace(10 ** (-6), 100, num=1000)
reg_cv = RidgeCV(alphas=alphas, store_cv_values=True, fit_intercept=False, gcv_mode='svd')
reg_cv.fit(sigX, Y)
alpha = reg_cv.alpha_
reg = Ridge(alpha = alpha, fit_intercept=False)
reg.fit(sigX,Y)
predict_train = reg.predict(sigX)
regs.append(reg)
pen = Kpen/(nPaths**rho)*math.sqrt(ii.siglength(dimPath,m))
#squareLoss = sum((Y_test-predict_test)**2)
squareLoss = sum((Y-predict_train)**2)/len(Y)
losses.append(squareLoss)
penalizedLosses.append(squareLoss + pen)
mHat = np.argmin(penalizedLosses) +1
if plotTrue:
base = np.linspace(1,m_max,num = m_max)
plt.figure()
plt.plot(base,penalizedLosses)
if mHatInput == None:
return mHat, regs[mHat-1], scalers[mHat-1]
else:
mHatInput = min(mHatInput,m_max)
return mHatInput, regs[mHatInput-1], scalers[mHatInput-1]
def select_hatm_cv(X, Y, max_k=None, scaling=False, plot=False):
"""Select the optimal value of hatm for the signature linear model implemented in the class SignatureRegression by
cross validation.
Parameters
----------
X: array, shape (n,n_points,d)
Array of training paths. It is a 3-dimensional array, containing the coordinates in R^d of n piecewise
linear paths, each composed of n_points.
Y: array, shape (n)
Array of target values.
max_k: int,
Maximal value of signature truncation to keep the number of features below max_features.
scaling: boolean, default=False
Whether to scale the predictor matrix to have zero mean and unit variance
plot: boolean, default=False
If true, plot the cross validation loss as a function of the truncation order.
Returns
-------
hatm: int
Optimal value of hatm.
"""
d = X.shape[2]
max_features = 10 ** 4
if max_k is None:
max_k = math.floor((math.log(max_features * (d - 1) + 1) / math.log(d)) - 1)
score = []
sigXmax = get_sigX(X,max_k)
for k in range(max_k+1):
if k == 0:
siglength = 0 #this is length without level 0 one!
else:
siglength = ii.siglength(d,k)
sigX = sigXmax[:,0:siglength+1]
kf = KFold(n_splits=5)
score_i = []
for train, test in kf.split(X):
reg = SignatureRegressionNik(k, normalizeFeatures=scaling)
reg.fit_fromSig(sigX[train], Y[train])
score_i += [reg.get_loss_fromSig(sigX[test], Y[test])]
score += [np.mean(score_i)]
if plot:
plt.plot(np.arange(max_k+1), score)
plt.show()
return np.argmin(score)
# class SignatureRegression():
# """ Signature regression class
# Parameters
# ----------
# m: int
# Truncation order of the signature
# scaling: boolean, default=True
# Whether to scale the predictor matrix to have zero mean and unit variance
# alpha: float, default=None
# Regularization parameter in the Ridge regression
# Attributes
# ----------
# reg: object
# Instance of sklearn.linear_model.Ridge
# scaler: object
# Instance of sklearn.preprocessing.StandardScaler
# """
# def __init__(self, m, scaling=False, alpha=None):
# self.scaling = scaling
# self.reg = Ridge(normalize=False, fit_intercept=False, solver='svd')
# self.m = m
# self.alpha = alpha
# if self.scaling:
# self.scaler = StandardScaler()
# def fit(self, X, Y, alphas=np.linspace(10 ** (-6), 100, num=1000)):
# """Fit a signature ridge regression.
# Parameters
# ----------
# X: array, shape (n,n_points,d)
# Array of training paths. It is a 3-dimensional array, containing the coordinates in R^d of n piecewise
# linear paths, each composed of n_points.
# Y: array, shape (n)
# Array of target values.
# alphas: array, default=np.linspace(10 ** (-6), 100, num=1000)
# Grid for the cross validation search of the regularization parameter in the Ridge regression.
# Returns
# -------
# reg: object
# Instance of sklearn.linear_model.Ridge
# """
# sigX = get_sigX(X, self.m)
# if self.scaling:
# self.scaler.fit(sigX)
# sigX = self.scaler.transform(sigX)
# if self.alpha is not None:
# self.reg.alpha_ = self.alpha
# else:
# reg_cv = RidgeCV(alphas=alphas, store_cv_values=True, fit_intercept=False, gcv_mode='svd')
# reg_cv.fit(sigX, Y)
# self.alpha = reg_cv.alpha_
# self.reg.alpha_ = self.alpha
# self.reg.fit(sigX, Y)
# return self.reg
# def predict(self, X):
# """Outputs prediction of self.reg, already trained with signatures truncated at order m.
# Parameters
# ----------
# X: array, shape (n,n_points,d)
# Array of training paths. It is a 3-dimensional array, containing the coordinates in R^d of n piecewise
# linear paths, each composed of n_points.
# Returns
# -------
# Ypred: array, shape (n)
# Array of predicted values.
# """
# sigX = get_sigX(X, self.m)
# if self.scaling:
# sigX = self.scaler.transform(sigX)
# Ypred = self.reg.predict(sigX)
# return Ypred
# def get_loss(self, X, Y, plot=False):
# """Computes the empirical squared loss obtained with a Ridge regression on signatures truncated at m.
# Parameters
# ----------
# X: array, shape (n,n_points,d)
# Array of training paths. It is a 3-dimensional array, containing the coordinates in R^d of n piecewise
# linear paths, each composed of n_points.
# Y: array, shape (n)
# Array of target values.
# plot: boolean, default=False
# If True, plots the regression coefficients and a scatter plot of the target values Y against its predicted
# values Ypred to assess the quality of the fit.
# Returns
# -------
# hatL: float
# The squared loss, that is the sum of the squares of Y-Ypred, where Ypred are the fitted values of the Ridge
# regression of Y against signatures of X truncated at m.
# """
# Ypred = self.predict(X)
# if plot:
# plt.scatter(Y, Ypred)
# plt.plot([0.9 * np.min(Y), 1.1 * np.max(Y)], [0.9 * np.min(Y), 1.1 * np.max(Y)], '--', color='black')
# plt.title("Ypred against Y")
# plt.show()
# return np.mean((Y - Ypred) ** 2)
# def score(self, X,Y): ##added by Nik
# return 1-self.get_loss(X,Y)/ np.mean((Y-np.mean(Y))**2)
class SignatureRegressionNik():
""" Signature regression class
Parameters
----------
m: int
Truncation order of the signature
scaling: boolean, default=True
Whether to scale the predictor matrix to have zero mean and unit variance
alpha: float, default=None
Regularization parameter in the Ridge regression
Attributes
----------
reg: object
Instance of sklearn.linear_model.Ridge
scaler: object
Instance of sklearn.preprocessing.StandardScaler
"""
def __init__(self, m, normalizeFeatures=False, alpha=None):
self.normalizeFeatures = normalizeFeatures
self.reg = Ridge(normalize=False, fit_intercept=False, solver='svd')
self.m = m
self.alpha = alpha
if self.normalizeFeatures:
self.scaler = StandardScaler()
def fit(self, X, Y, alphas=np.linspace(10 ** (-6), 100, num=1000)):
"""Fit a signature ridge regression.
Parameters
----------
X: array, shape (n,n_points,d)
Array of training paths. It is a 3-dimensional array, containing the coordinates in R^d of n piecewise
linear paths, each composed of n_points.
Y: array, shape (n)
Array of target values.
alphas: array, default=np.linspace(10 ** (-6), 100, num=1000)
Grid for the cross validation search of the regularization parameter in the Ridge regression.
Returns
-------
reg: object
Instance of sklearn.linear_model.Ridge
"""
sigX = get_sigX(X,self.m)
self.sigX = sigX
if self.normalizeFeatures:
self.scaler.fit(sigX)
sigX = self.scaler.transform(sigX)
if self.alpha is None: #select alpha by cross-validation
alphas=np.linspace(10 ** (-6), 100, num=1000)
self.reg_cv = RidgeCV(alphas=alphas, store_cv_values=True, fit_intercept=False, gcv_mode='svd')
self.reg_cv.fit(sigX, Y)
self.alpha = self.reg_cv.alpha_
self.reg = Ridge(alpha = self.alpha, fit_intercept=False)
self.reg.fit(sigX,Y)
return self.reg
def predict(self, X):
"""Outputs prediction of self.reg, already trained with signatures truncated at order m.
Parameters
----------
X: array, shape (n,n_points,d)
Array of training paths. It is a 3-dimensional array, containing the coordinates in R^d of n piecewise
linear paths, each composed of n_points.
Returns
-------
Ypred: array, shape (n)
Array of predicted values.
"""
sigX = get_sigX(X, self.m)
if self.normalizeFeatures:
sigX = self.scaler.transform(sigX)
Ypred = self.reg.predict(sigX)
return Ypred
def get_loss(self, X, Y, plot=False):
"""Computes the empirical squared loss obtained with a Ridge regression on signatures truncated at m.
Parameters
----------
X: array, shape (n,n_points,d)
Array of training paths. It is a 3-dimensional array, containing the coordinates in R^d of n piecewise
linear paths, each composed of n_points.
Y: array, shape (n)
Array of target values.
plot: boolean, default=False
If True, plots the regression coefficients and a scatter plot of the target values Y against its predicted
values Ypred to assess the quality of the fit.
Returns
-------
hatL: float
The squared loss, that is the sum of the squares of Y-Ypred, where Ypred are the fitted values of the Ridge
regression of Y against signatures of X truncated at m.
"""
Ypred = self.predict(X)
if plot:
plt.scatter(Y, Ypred)
plt.plot([0.9 * np.min(Y), 1.1 * np.max(Y)], [0.9 * np.min(Y), 1.1 * np.max(Y)], '--', color='black')
plt.title("Ypred against Y")
plt.show()
return np.mean((Y - Ypred) ** 2)
def score(self, X,Y):
return 1-self.get_loss(X,Y)/ np.mean((Y-np.mean(Y))**2)
def fit_fromSig(self, sigX, Y, alphas=np.linspace(10 ** (-6), 100, num=1000)):
if self.normalizeFeatures:
self.scaler.fit(sigX)
sigX = self.scaler.transform(sigX)
if self.alpha is None: #select alpha by cross-validation
self.reg_cv = RidgeCV(alphas=alphas, store_cv_values=True, fit_intercept=False, gcv_mode='svd')
self.reg_cv.fit(sigX, Y)
self.alpha = self.reg_cv.alpha_
self.reg = Ridge(alpha = self.alpha, fit_intercept=False)
self.reg.fit(sigX,Y)
return self.reg
def predict_fromSig(self, sigX):
if self.normalizeFeatures:
sigX = self.scaler.transform(sigX)
Ypred = self.reg.predict(sigX)
return Ypred
def get_loss_fromSig(self, sigX, Y, plot = False):
Ypred = self.predict_fromSig(sigX)
return np.mean((Y - Ypred) ** 2)
def score_fromSig(self, sigX, Y):
return 1-self.get_loss_fromSig(sigX,Y)/ np.mean((Y-np.mean(Y))**2)
# if __name__ == '__main__':
# import dataGeneration as dg
# dimPath = 2
# nPaths = 10000
# mStar = 5
# G = dg.GeneratorFermanian1(dimPath,nPaths,mStar, num = 101)
# G.generatePath()
# G.generateResponse()
# #X = np.array(G.X)
# # add time:
# X = np.array([np.concatenate((G.partition01.reshape(-1,1), x),axis = 1) for x in G.X])
# Y = G.Y
# Kpen = getKpen(X,Y,max_Kpen = 2000,rho = 0.25,alpha = None,normalizeFeatures = True, plotTrue = True)
# mHat, reg,_ = getmHat(X, Y, Kpen, rho = 0.25, alpha = None, m_max = None, normalizeFeatures=True, plotTrue = True)
# print('Kpen: ', Kpen)
# print('m_hat: ', mHat)
# print('alpha: ', reg.alpha)
| 32.556377
| 121
| 0.577414
|
7ef210aa963e2352eb50840eb5084b34b9faf651
| 1,557
|
py
|
Python
|
aiida/work/__init__.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/work/__init__.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/work/__init__.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from plumpy import Bundle
from plumpy import ProcessState
from .exceptions import *
from .exit_code import *
from .futures import *
from .launch import *
from .job_processes import *
from .persistence import *
from .processes import *
from .rmq import *
from .runners import *
from .utils import *
from .workfunctions import *
from .workchain import *
from .manager import *
_local = ('ProcessState',)
__all__ = (
exceptions.__all__ +
exit_code.__all__ +
processes.__all__ +
runners.__all__ +
utils.__all__ +
workchain.__all__ +
launch.__all__ +
workfunctions.__all__ +
job_processes.__all__ +
rmq.__all__ +
futures.__all__ +
persistence.__all__ +
manager.__all__ + # TODO: To be moved later
_local)
| 33.12766
| 75
| 0.558767
|
a9a9ad917539df994ea7eb20de5bbacf775446f2
| 2,317
|
py
|
Python
|
portia_server/portia_api/jsonapi/exceptions.py
|
hackrush01/portia
|
c7414034361fecada76e1693666674c274b0421a
|
[
"BSD-3-Clause"
] | 6,390
|
2015-01-01T17:05:13.000Z
|
2022-03-31T08:20:12.000Z
|
portia_server/portia_api/jsonapi/exceptions.py
|
hackrush01/portia
|
c7414034361fecada76e1693666674c274b0421a
|
[
"BSD-3-Clause"
] | 442
|
2015-01-04T17:32:20.000Z
|
2022-03-15T21:21:23.000Z
|
portia_server/portia_api/jsonapi/exceptions.py
|
hackrush01/portia
|
c7414034361fecada76e1693666674c274b0421a
|
[
"BSD-3-Clause"
] | 1,288
|
2015-01-09T05:54:20.000Z
|
2022-03-31T03:21:51.000Z
|
from collections import OrderedDict
from uuid import uuid4
from rest_framework.exceptions import APIException, ValidationError
from rest_framework.status import (HTTP_400_BAD_REQUEST, HTTP_409_CONFLICT,
HTTP_404_NOT_FOUND)
from rest_framework.views import exception_handler
from .utils import get_status_title
class JsonApiValidationError(ValidationError):
def __init__(self, detail):
super(JsonApiValidationError, self).__init__({
'errors': [OrderedDict([
('status', self.status_code),
('title', get_status_title(self.status_code)),
('detail', error['detail']),
('source', error['source']),
]) for error in detail.get('errors', [])]
})
def render_exception(status_code, detail):
return {
'errors': [OrderedDict([
('id', str(uuid4())),
('status', status_code),
('title', get_status_title(status_code)),
('detail', detail)
])]
}
class JsonApiBadRequestError(APIException):
status_code = HTTP_400_BAD_REQUEST
default_detail = (u"The server cannot process the request due to invalid "
u"data.")
class JsonApiNotFoundError(APIException):
status_code = HTTP_404_NOT_FOUND
default_detail = u"Could not find the resource specified"
class JsonApiConflictError(APIException):
status_code = HTTP_409_CONFLICT
default_detail = u"The server cannot process the request due to a conflict."
class JsonApiFeatureNotAvailableError(JsonApiBadRequestError):
default_detail = u"This feature is not available for your project."
class JsonApiGeneralException(APIException):
def __init__(self, detail=None, status_code=None):
assert status_code is not None
self.status_code = status_code
super(JsonApiGeneralException, self).__init__(detail)
def jsonapi_exception_handler(exc, context):
accepts = context['request'].accepted_media_type or ''
if accepts.startswith('application/vnd.api+json'):
try:
exc.detail = render_exception(exc.status_code, exc.detail)
except AttributeError:
pass # Ignore django exceptions
response = exception_handler(exc, context)
return response
| 32.180556
| 80
| 0.676737
|
0078e1ebeb87acd6dd9d2161a2be8538ef77ad4d
| 14,659
|
py
|
Python
|
st2client/tests/unit/test_commands.py
|
meghasfdc/st2
|
7079635e94942e7b44ae74daa6a7378a00e518d9
|
[
"Apache-2.0"
] | 1
|
2020-10-26T03:26:17.000Z
|
2020-10-26T03:26:17.000Z
|
st2client/tests/unit/test_commands.py
|
meghasfdc/st2
|
7079635e94942e7b44ae74daa6a7378a00e518d9
|
[
"Apache-2.0"
] | 1
|
2022-03-31T03:53:22.000Z
|
2022-03-31T03:53:22.000Z
|
st2client/tests/unit/test_commands.py
|
meghasfdc/st2
|
7079635e94942e7b44ae74daa6a7378a00e518d9
|
[
"Apache-2.0"
] | 1
|
2019-10-11T14:42:28.000Z
|
2019-10-11T14:42:28.000Z
|
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import mock
import json
import logging
import argparse
import tempfile
import unittest2
from collections import namedtuple
from tests import base
from tests.base import BaseCLITestCase
from st2client.shell import Shell
from st2client import models
from st2client.utils import httpclient
from st2client.commands import resource
from st2client.commands.resource import ResourceViewCommand
__all__ = [
'TestResourceCommand',
'ResourceViewCommandTestCase'
]
LOG = logging.getLogger(__name__)
class TestResourceCommand(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestResourceCommand, self).__init__(*args, **kwargs)
self.parser = argparse.ArgumentParser()
self.subparsers = self.parser.add_subparsers()
self.branch = resource.ResourceBranch(
base.FakeResource, 'Test Command', base.FakeApp(), self.subparsers)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(base.RESOURCES), 200, 'OK')))
def test_command_list(self):
args = self.parser.parse_args(['fakeresource', 'list'])
self.assertEqual(args.func, self.branch.commands['list'].run_and_print)
instances = self.branch.commands['list'].run(args)
actual = [instance.serialize() for instance in instances]
expected = json.loads(json.dumps(base.RESOURCES))
self.assertListEqual(actual, expected)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_list_failed(self):
args = self.parser.parse_args(['fakeresource', 'list'])
self.assertRaises(Exception, self.branch.commands['list'].run, args)
@mock.patch.object(
models.ResourceManager, 'get_by_name',
mock.MagicMock(return_value=None))
@mock.patch.object(
models.ResourceManager, 'get_by_id',
mock.MagicMock(return_value=base.FakeResource(**base.RESOURCES[0])))
def test_command_get_by_id(self):
args = self.parser.parse_args(['fakeresource', 'get', '123'])
self.assertEqual(args.func, self.branch.commands['get'].run_and_print)
instance = self.branch.commands['get'].run(args)
actual = instance.serialize()
expected = json.loads(json.dumps(base.RESOURCES[0]))
self.assertEqual(actual, expected)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(base.RESOURCES[0]), 200, 'OK')))
def test_command_get(self):
args = self.parser.parse_args(['fakeresource', 'get', 'abc'])
self.assertEqual(args.func, self.branch.commands['get'].run_and_print)
instance = self.branch.commands['get'].run(args)
actual = instance.serialize()
expected = json.loads(json.dumps(base.RESOURCES[0]))
self.assertEqual(actual, expected)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse('', 404, 'NOT FOUND')))
def test_command_get_404(self):
args = self.parser.parse_args(['fakeresource', 'get', 'cba'])
self.assertEqual(args.func, self.branch.commands['get'].run_and_print)
self.assertRaises(resource.ResourceNotFoundError,
self.branch.commands['get'].run,
args)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_get_failed(self):
args = self.parser.parse_args(['fakeresource', 'get', 'cba'])
self.assertRaises(Exception, self.branch.commands['get'].run, args)
@mock.patch.object(
httpclient.HTTPClient, 'post',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(base.RESOURCES[0]), 200, 'OK')))
def test_command_create(self):
instance = base.FakeResource(name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(['fakeresource', 'create', path])
self.assertEqual(args.func,
self.branch.commands['create'].run_and_print)
instance = self.branch.commands['create'].run(args)
actual = instance.serialize()
expected = json.loads(json.dumps(base.RESOURCES[0]))
self.assertEqual(actual, expected)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'post',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_create_failed(self):
instance = base.FakeResource(name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(['fakeresource', 'create', path])
self.assertRaises(Exception,
self.branch.commands['create'].run,
args)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK',
{})))
@mock.patch.object(
httpclient.HTTPClient, 'put',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(base.RESOURCES[0]), 200, 'OK')))
def test_command_update(self):
instance = base.FakeResource(id='123', name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(
['fakeresource', 'update', '123', path])
self.assertEqual(args.func,
self.branch.commands['update'].run_and_print)
instance = self.branch.commands['update'].run(args)
actual = instance.serialize()
expected = json.loads(json.dumps(base.RESOURCES[0]))
self.assertEqual(actual, expected)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK')))
@mock.patch.object(
httpclient.HTTPClient, 'put',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_update_failed(self):
instance = base.FakeResource(id='123', name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(
['fakeresource', 'update', '123', path])
self.assertRaises(Exception,
self.branch.commands['update'].run,
args)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK')))
def test_command_update_id_mismatch(self):
instance = base.FakeResource(id='789', name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(
['fakeresource', 'update', '123', path])
self.assertRaises(Exception,
self.branch.commands['update'].run,
args)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK',
{})))
@mock.patch.object(
httpclient.HTTPClient, 'delete',
mock.MagicMock(return_value=base.FakeResponse('', 204, 'NO CONTENT')))
def test_command_delete(self):
args = self.parser.parse_args(['fakeresource', 'delete', 'abc'])
self.assertEqual(args.func,
self.branch.commands['delete'].run_and_print)
self.branch.commands['delete'].run(args)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse('', 404, 'NOT FOUND')))
def test_command_delete_404(self):
args = self.parser.parse_args(['fakeresource', 'delete', 'cba'])
self.assertEqual(args.func,
self.branch.commands['delete'].run_and_print)
self.assertRaises(resource.ResourceNotFoundError,
self.branch.commands['delete'].run,
args)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK')))
@mock.patch.object(
httpclient.HTTPClient, 'delete',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_delete_failed(self):
args = self.parser.parse_args(['fakeresource', 'delete', 'cba'])
self.assertRaises(Exception, self.branch.commands['delete'].run, args)
class ResourceViewCommandTestCase(unittest2.TestCase):
def setUp(self):
ResourceViewCommand.display_attributes = []
def test_get_include_attributes(self):
cls = namedtuple('Args', 'attr')
args = cls(attr=[])
result = ResourceViewCommand._get_include_attributes(args=args)
self.assertEqual(result, [])
args = cls(attr=['result'])
result = ResourceViewCommand._get_include_attributes(args=args)
self.assertEqual(result, ['result'])
args = cls(attr=['result', 'trigger_instance'])
result = ResourceViewCommand._get_include_attributes(args=args)
self.assertEqual(result, ['result', 'trigger_instance'])
args = cls(attr=['result.stdout'])
result = ResourceViewCommand._get_include_attributes(args=args)
self.assertEqual(result, ['result.stdout'])
args = cls(attr=['result.stdout', 'result.stderr'])
result = ResourceViewCommand._get_include_attributes(args=args)
self.assertEqual(result, ['result.stdout', 'result.stderr'])
args = cls(attr=['result.stdout', 'trigger_instance.id'])
result = ResourceViewCommand._get_include_attributes(args=args)
self.assertEqual(result, ['result.stdout', 'trigger_instance.id'])
ResourceViewCommand.display_attributes = ['id', 'status']
args = cls(attr=[])
result = ResourceViewCommand._get_include_attributes(args=args)
self.assertEqual(set(result), set(['id', 'status']))
args = cls(attr=['trigger_instance'])
result = ResourceViewCommand._get_include_attributes(args=args)
self.assertEqual(set(result), set(['trigger_instance']))
args = cls(attr=['all'])
result = ResourceViewCommand._get_include_attributes(args=args)
self.assertEqual(result, None)
class CommandsHelpStringTestCase(BaseCLITestCase):
"""
Test case which verifies that all the commands support -h / --help flag.
"""
capture_output = True
# TODO: Automatically iterate all the available commands
COMMANDS = [
# action
['action', 'list'],
['action', 'get'],
['action', 'create'],
['action', 'update'],
['action', 'delete'],
['action', 'enable'],
['action', 'disable'],
['action', 'execute'],
# execution
['execution', 'cancel'],
['execution', 'pause'],
['execution', 'resume'],
['execution', 'tail']
]
def test_help_command_line_arg_works_for_supported_commands(self):
shell = Shell()
for command in self.COMMANDS:
# First test longhang notation
argv = command + ['--help']
try:
result = shell.run(argv)
except SystemExit as e:
self.assertEqual(e.code, 0)
else:
self.assertEqual(result, 0)
stdout = self.stdout.getvalue()
self.assertTrue('usage:' in stdout)
self.assertTrue(' '.join(command) in stdout)
# self.assertTrue('positional arguments:' in stdout)
self.assertTrue('optional arguments:' in stdout)
# Reset stdout and stderr after each iteration
self._reset_output_streams()
# Then shorthand notation
argv = command + ['-h']
try:
result = shell.run(argv)
except SystemExit as e:
self.assertEqual(e.code, 0)
else:
self.assertEqual(result, 0)
stdout = self.stdout.getvalue()
self.assertTrue('usage:' in stdout)
self.assertTrue(' '.join(command) in stdout)
# self.assertTrue('positional arguments:' in stdout)
self.assertTrue('optional arguments:' in stdout)
# Verify that the actual help usage string was triggered and not the invalid
# "too few arguments" which would indicate command doesn't actually correctly handle
# --help flag
self.assertTrue('too few arguments' not in stdout)
self._reset_output_streams()
| 39.834239
| 99
| 0.617232
|
45e040cc9ef66ee7c20ad2ce3775a8974be55d4e
| 4,929
|
py
|
Python
|
unit_tests/view_modify_land_charge/test_update_location_confirmation.py
|
LandRegistry/maintain-frontend
|
d92446a9972ebbcd9a43a7a7444a528aa2f30bf7
|
[
"MIT"
] | 1
|
2019-10-03T13:58:29.000Z
|
2019-10-03T13:58:29.000Z
|
unit_tests/view_modify_land_charge/test_update_location_confirmation.py
|
LandRegistry/maintain-frontend
|
d92446a9972ebbcd9a43a7a7444a528aa2f30bf7
|
[
"MIT"
] | null | null | null |
unit_tests/view_modify_land_charge/test_update_location_confirmation.py
|
LandRegistry/maintain-frontend
|
d92446a9972ebbcd9a43a7a7444a528aa2f30bf7
|
[
"MIT"
] | 1
|
2021-04-11T05:24:57.000Z
|
2021-04-11T05:24:57.000Z
|
from maintain_frontend import main
from flask_testing import TestCase
from flask import url_for
from unit_tests.utilities import Utilities
from unittest.mock import patch
from maintain_frontend.dependencies.session_api.session import Session
from maintain_frontend.models import LocalLandChargeItem
from maintain_frontend.constants.permissions import Permissions
class TestUpdateLocationConfirmation(TestCase):
def create_app(self):
Utilities.mock_session_cookie_flask_test(self)
return main.app
def test_get_without_geom(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = [Permissions.vary_llc]
state = LocalLandChargeItem()
state.local_land_charge = 9372254
state.geometry = None
self.mock_session.return_value.add_charge_state = state
response = self.client.get(url_for('modify_land_charge.get_update_location_confirmation'))
self.assert_status(response, 302)
self.assertRedirects(response, '/error')
def test_get_without_state(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = [Permissions.vary_llc]
self.mock_session.return_value.add_charge_state = None
response = self.client.get(url_for('modify_land_charge.get_update_location_confirmation'))
self.assert_status(response, 302)
self.assertRedirects(response, '/error')
def test_get_with_state(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = [Permissions.vary_llc]
state = LocalLandChargeItem()
state.local_land_charge = 9372254
state.geometry = 'abc'
self.mock_session.return_value.add_charge_state = state
response = self.client.get(url_for('modify_land_charge.get_update_location_confirmation'))
self.assert_status(response, 200)
self.assert_template_used('update_location_confirmation.html')
def test_post_without_geom(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = [Permissions.vary_llc]
state = LocalLandChargeItem()
state.local_land_charge = 9372254
state.geometry = None
self.mock_session.return_value.add_charge_state = state
response = self.client.post(url_for('modify_land_charge.post_update_location_confirmation'))
self.assert_status(response, 302)
self.assertRedirects(response, '/error')
def test_post_without_state(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = [Permissions.vary_llc]
self.mock_session.return_value.add_charge_state = None
response = self.client.post(url_for('modify_land_charge.post_update_location_confirmation'))
self.assert_status(response, 302)
self.assertRedirects(response, '/error')
@patch('maintain_frontend.view_modify_land_charge.update_location_confirmation.LocationConfirmationValidator')
def test_location_post_validation_errors(self, mock_location_validator):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = [Permissions.vary_llc]
state = LocalLandChargeItem()
state.geometry = "abc"
state.local_land_charge = 9372254
self.mock_session.return_value.add_charge_state = state
validation_errors = {'map': 'test error message'}
mock_location_validator.validate.return_value.errors = validation_errors
response = self.client.post(url_for('modify_land_charge.post_update_location_confirmation'))
self.assert_status(response, 400)
self.assert_template_used('update_location_confirmation.html')
def test_post_success(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = [Permissions.vary_llc]
self.mock_session.return_value.user.roles = ['LLC LR Admins']
state = LocalLandChargeItem()
state.geometry = "abc"
state.local_land_charge = 399664232600384
self.mock_session.return_value.add_charge_state = state
form_data = {'location-confirmation': True}
response = self.client.post(url_for('modify_land_charge.post_update_location_confirmation'), data=form_data)
self.assert_status(response, 302)
self.assertRedirects(response, url_for('modify_land_charge.modify_land_charge',
local_land_charge='LLC-H3LL0W0RLD'))
| 46.942857
| 116
| 0.742138
|
77ed7dff8ca7b2f7228852e78ce17954a9a33285
| 5,394
|
py
|
Python
|
vkbottle/bot/events/processor.py
|
croogg/vkbottle
|
7355c2ef89d302410c8e05be162ba71e5f040990
|
[
"MIT"
] | null | null | null |
vkbottle/bot/events/processor.py
|
croogg/vkbottle
|
7355c2ef89d302410c8e05be162ba71e5f040990
|
[
"MIT"
] | null | null | null |
vkbottle/bot/events/processor.py
|
croogg/vkbottle
|
7355c2ef89d302410c8e05be162ba71e5f040990
|
[
"MIT"
] | 2
|
2020-05-10T11:48:25.000Z
|
2021-12-02T09:22:54.000Z
|
"""
MIT License
Copyright (c) 2019 Arseniy Timonik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from ...vktypes.longpoll import EventTypes
from ...methods import Api
from ..events import Events
from ...utils import Logger, sorted_dict_keys
import time
from ...vktypes import types
from ...project_collections import colored
class UpdatesProcessor(object):
"""
Processor of VK API LongPoll events
"""
on: Events
logger: Logger
api: Api
a: float
async def new_update(self, event: dict):
"""
Process VK Event Object
:param event: VK Server Event object
"""
for update in event['updates']:
obj = update['object']
if update['type'] == EventTypes.MESSAGE_NEW:
if obj['peer_id'] < 2e9:
await self.new_message(obj)
else:
await self.new_chat_message(obj)
else:
# If this is an event of the group
print('receive event')
pass
await self.logger('Timing:', round(time.time() - self.a, 5))
async def new_message(self, obj: dict):
"""
Private message processor. Using regex to process regular expressions in messages
:param obj: VK API Event Object
"""
await self.logger(
colored(
'-> MESSAGE FROM {} TEXT "{}" TIME #'.format(
obj['peer_id'],
obj['text'].replace('\n', ' / ')
),
'red'
)
)
answer = types.Message(**obj, api=[self.api])
found: bool = False
for priority in await sorted_dict_keys(self.on.processor_message_regex):
for key in self.on.processor_message_regex[priority]:
if key.match(answer.text) is not None:
found = True
# [Feature] Async Use
# Added v0.19#master
await self.on.processor_message_regex[priority][key](
answer,
**key.match(answer.text).groupdict()
)
await self.logger(
'New message compiled with decorator <' +
colored(self.on.processor_message_regex[priority][key].__name__, 'magenta') +
'> (from: {})'.format(
obj['peer_id']
)
)
break
if found:
break
if not found:
await self.on.undefined_message_func(answer)
async def new_chat_message(self, obj: dict):
"""
Chat messages processor. Using regex to process regular expressions in messages
:param obj: VK API Event Object
"""
await self.logger(
colored(
'-> MESSAGE FROM CHAT {} TEXT "{}" TIME #'.format(
obj['peer_id'],
obj['text'].replace('\n', ' ')
),
'red'
))
answer = types.Message(**obj, api=[self.api])
found: bool = False
for priority in await sorted_dict_keys(self.on.processor_message_chat_regex):
for key in self.on.processor_message_chat_regex[priority]:
print(key)
if key.match(answer.text) is not None:
found = True
# [Feature] Async Use
# Added v0.19#master
await self.on.processor_message_chat_regex[priority][key](
answer,
**key.match(answer.text).groupdict()
)
await self.logger(
'New message compiled with decorator <\x1b[35m{}\x1b[0m> (from: {})'.format(
self.on.processor_message_chat_regex[priority][key].__name__,
obj['peer_id']
)
)
break
if found:
break
async def new_event(self, event_type: str, obj: dict):
"""
LongPoll Events Processor
:param event_type: VK Server Event Type
:param obj: VK Server Event Object
"""
pass
| 31
| 101
| 0.54505
|
62f991041b56f47897169955eb975c24bc7e5520
| 10,203
|
py
|
Python
|
tests/functional/dashboard/test_offer.py
|
QueoLda/django-oscar
|
8dd992d82e31d26c929b3caa0e08b57e9701d097
|
[
"BSD-3-Clause"
] | 4,639
|
2015-01-01T00:42:33.000Z
|
2022-03-29T18:32:12.000Z
|
tests/functional/dashboard/test_offer.py
|
QueoLda/django-oscar
|
8dd992d82e31d26c929b3caa0e08b57e9701d097
|
[
"BSD-3-Clause"
] | 2,215
|
2015-01-02T22:32:51.000Z
|
2022-03-29T12:16:23.000Z
|
tests/functional/dashboard/test_offer.py
|
QueoLda/django-oscar
|
8dd992d82e31d26c929b3caa0e08b57e9701d097
|
[
"BSD-3-Clause"
] | 2,187
|
2015-01-02T06:33:31.000Z
|
2022-03-31T15:32:36.000Z
|
from django.urls import reverse
from django.utils import timezone
from oscar.apps.offer import models
from oscar.test import factories, testcases
class TestAnAdmin(testcases.WebTestCase):
# New version of offer tests buy using WebTest
is_staff = True
def setUp(self):
super().setUp()
self.range = models.Range.objects.create(
name="All products", includes_all_products=True)
def test_can_create_an_offer(self):
list_page = self.get(reverse('dashboard:offer-list'))
metadata_page = list_page.click('Create new offer')
metadata_form = metadata_page.form
metadata_form['name'] = "Test offer"
metadata_form['offer_type'] = models.ConditionalOffer.SITE
benefit_page = metadata_form.submit().follow()
benefit_form = benefit_page.form
benefit_form['range'] = self.range.id
benefit_form['type'] = "Percentage"
benefit_form['value'] = "25"
condition_page = benefit_form.submit().follow()
condition_form = condition_page.form
condition_form['range'] = self.range.id
condition_form['type'] = "Count"
condition_form['value'] = "3"
restrictions_page = condition_form.submit().follow()
restrictions_page.form.submit()
offers = models.ConditionalOffer.objects.all()
self.assertEqual(1, len(offers))
offer = offers[0]
self.assertEqual("Test offer", offer.name)
self.assertEqual(3, offer.condition.value)
self.assertEqual(25, offer.benefit.value)
def test_offer_list_page(self):
offer = factories.create_offer(name="Offer A")
list_page = self.get(reverse('dashboard:offer-list'))
form = list_page.forms[0]
form['name'] = "I do not exist"
res = form.submit()
self.assertTrue("No offers found" in res.text)
form['name'] = "Offer A"
res = form.submit()
self.assertFalse("No offers found" in res.text)
form['is_active'] = "true"
res = form.submit()
self.assertFalse("No offers found" in res.text)
yesterday = timezone.now() - timezone.timedelta(days=1)
offer.end_datetime = yesterday
offer.save()
form['is_active'] = "true"
res = form.submit()
self.assertTrue("No offers found" in res.text)
tomorrow = timezone.now() + timezone.timedelta(days=1)
offer.end_datetime = tomorrow
offer.save()
form['offer_type'] = "Site"
res = form.submit()
self.assertFalse("No offers found" in res.text)
form['offer_type'] = "Voucher"
res = form.submit()
self.assertTrue("No offers found" in res.text)
def test_can_update_an_existing_offer(self):
factories.create_offer(name="Offer A")
list_page = self.get(reverse('dashboard:offer-list'))
detail_page = list_page.click('Offer A')
metadata_page = detail_page.click(linkid="edit_metadata")
metadata_form = metadata_page.form
metadata_form['name'] = "Offer A+"
metadata_form['offer_type'] = models.ConditionalOffer.SITE
benefit_page = metadata_form.submit().follow()
benefit_form = benefit_page.form
condition_page = benefit_form.submit().follow()
condition_form = condition_page.form
restrictions_page = condition_form.submit().follow()
restrictions_page.form.submit()
models.ConditionalOffer.objects.get(name="Offer A+")
def test_can_update_an_existing_offer_save_directly(self):
# see if we can save the offer directly without completing all
# steps
offer = factories.create_offer(name="Offer A")
name_and_description_page = self.get(
reverse('dashboard:offer-metadata', kwargs={'pk': offer.pk}))
res = name_and_description_page.form.submit('save').follow()
self.assertEqual(200, res.status_code)
def test_can_jump_to_intermediate_step_for_existing_offer(self):
offer = factories.create_offer()
url = reverse('dashboard:offer-condition',
kwargs={'pk': offer.id})
self.assertEqual(200, self.get(url).status_code)
def test_cannot_jump_to_intermediate_step(self):
for url_name in ('dashboard:offer-condition',
'dashboard:offer-benefit',
'dashboard:offer-restrictions'):
response = self.get(reverse(url_name))
self.assertEqual(302, response.status_code)
def test_can_suspend_an_offer(self):
# Create an offer
offer = factories.create_offer()
self.assertFalse(offer.is_suspended)
detail_page = self.get(reverse('dashboard:offer-detail',
kwargs={'pk': offer.pk}))
form = detail_page.forms['status_form']
form.submit('suspend')
offer.refresh_from_db()
self.assertTrue(offer.is_suspended)
def test_can_reinstate_a_suspended_offer(self):
# Create a suspended offer
offer = factories.create_offer()
offer.suspend()
self.assertTrue(offer.is_suspended)
detail_page = self.get(reverse('dashboard:offer-detail',
kwargs={'pk': offer.pk}))
form = detail_page.forms['status_form']
form.submit('unsuspend')
offer.refresh_from_db()
self.assertFalse(offer.is_suspended)
def test_can_change_offer_priority(self):
offer = factories.create_offer()
restrictions_page = self.get(reverse('dashboard:offer-restrictions', kwargs={'pk': offer.pk}))
restrictions_page.form['priority'] = '12'
restrictions_page.form.submit()
offer.refresh_from_db()
self.assertEqual(offer.priority, 12)
def test_jump_back_to_incentive_step_for_new_offer(self):
list_page = self.get(reverse('dashboard:offer-list'))
metadata_page = list_page.click('Create new offer')
metadata_form = metadata_page.form
metadata_form['name'] = "Test offer"
metadata_form['offer_type'] = models.ConditionalOffer.SITE
benefit_page = metadata_form.submit().follow()
benefit_form = benefit_page.form
benefit_form['range'] = self.range.id
benefit_form['type'] = "Percentage"
benefit_form['value'] = "25"
benefit_form.submit()
benefit_page = self.get(reverse('dashboard:offer-benefit'))
# Accessing through context because WebTest form does not include an 'errors' field
benefit_form = benefit_page.context['form']
self.assertFalse('range' in benefit_form.errors)
self.assertEqual(len(benefit_form.errors), 0)
def test_jump_back_to_condition_step_for_new_offer(self):
list_page = self.get(reverse('dashboard:offer-list'))
metadata_page = list_page.click('Create new offer')
metadata_form = metadata_page.form
metadata_form['name'] = "Test offer"
metadata_form['offer_type'] = models.ConditionalOffer.SITE
benefit_page = metadata_form.submit().follow()
benefit_form = benefit_page.form
benefit_form['range'] = self.range.id
benefit_form['type'] = "Percentage"
benefit_form['value'] = "25"
condition_page = benefit_form.submit().follow()
condition_form = condition_page.form
condition_form['range'] = self.range.id
condition_form['type'] = "Count"
condition_form['value'] = "3"
condition_form.submit()
condition_page = self.get(reverse('dashboard:offer-condition'))
self.assertFalse('range' in condition_page.errors)
self.assertEqual(len(condition_page.errors), 0)
def test_jump_to_incentive_step_for_existing_offer(self):
offer = factories.create_offer()
url = reverse('dashboard:offer-benefit', kwargs={'pk': offer.id})
condition_page = self.get(url)
self.assertFalse('range' in condition_page.errors)
self.assertEqual(len(condition_page.errors), 0)
def test_jump_to_condition_step_for_existing_offer(self):
offer = factories.create_offer()
url = reverse('dashboard:offer-condition', kwargs={'pk': offer.id})
condition_page = self.get(url)
self.assertFalse('range' in condition_page.errors)
self.assertEqual(len(condition_page.errors), 0)
class TestOfferListSearch(testcases.WebTestCase):
is_staff = True
TEST_CASES = [
({}, []),
(
{'name': 'Bob Smith'},
['Name matches "Bob Smith"']
),
(
{'is_active': True},
['Is active']
),
(
{'is_active': False},
['Is inactive']
),
(
{'offer_type': 'Site'},
['Is of type "Site offer - available to all users"']
),
(
{'has_vouchers': True},
['Has vouchers']
),
(
{'has_vouchers': False},
['Has no vouchers']
),
(
{'voucher_code': 'abcd1234'},
['Voucher code matches "abcd1234"']
),
(
{
'name': 'Bob Smith',
'is_active': True,
'offer_type': 'Site',
'has_vouchers': True,
'voucher_code': 'abcd1234',
},
[
'Name matches "Bob Smith"',
'Is active',
'Is of type "Site offer - available to all users"',
'Has vouchers',
'Voucher code matches "abcd1234"',
]
),
]
def test_search_filter_descriptions(self):
url = reverse('dashboard:offer-list')
for params, expected_filters in self.TEST_CASES:
response = self.get(url, params=params)
self.assertEqual(response.status_code, 200)
applied_filters = [
el.text.strip() for el in
response.html.select('.search-filter-list .badge')
]
self.assertEqual(applied_filters, expected_filters)
| 34.941781
| 102
| 0.613055
|
f02054ac75d196f9d24dcbe1fee5d3c87e604dbd
| 72,823
|
py
|
Python
|
pmagpy_tests/test_imports3.py
|
schwehr/PmagPy
|
5e9edc5dc9a7a243b8e7f237fa156e0cd782076b
|
[
"BSD-3-Clause"
] | 2
|
2020-07-05T01:11:33.000Z
|
2020-07-05T01:11:39.000Z
|
pmagpy_tests/test_imports3.py
|
schwehr/PmagPy
|
5e9edc5dc9a7a243b8e7f237fa156e0cd782076b
|
[
"BSD-3-Clause"
] | null | null | null |
pmagpy_tests/test_imports3.py
|
schwehr/PmagPy
|
5e9edc5dc9a7a243b8e7f237fa156e0cd782076b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import unittest
import os
#import sys
from pmagpy import pmag
from pmagpy import contribution_builder as cb
from pmagpy import convert_2_magic as convert
WD = pmag.get_test_WD()
class Test2g_bin_magic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
#input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
#'IODP_jr6_magic')
#files = ['test.magic', 'other_er_samples.txt']
files = ['mn001-1a.magic', 'samples.txt', 'sites.txt',
'measurements.txt', 'locations.txt', 'specimens.txt']
pmag.remove_files(files, WD)
pmag.remove_files(['custom_specimens.txt', 'samples.txt',
'sites.txt', 'locations.txt'], 'data_files')
pmag.remove_files(files, os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1'))
os.chdir(WD)
def test_2g_with_no_files(self):
options = {}
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, 'mag file is required input')
def test_2g_with_files(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
program_ran, outfile = convert._2g_bin(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.split(outfile)[1], 'measurements.txt')
self.assertTrue(os.path.isfile(outfile))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_2g_fail_option4(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '4'
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, 'option [4] must be in form 4-Z where Z is an integer')
def test_2g_succeed_option4(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '4-3'
program_ran, outfile = convert._2g_bin(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.split(outfile)[1], 'measurements.txt')
def test_2g_fail_option7(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '7'
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, 'option [7] must be in form 7-Z where Z is an integer')
def test_2g_succeed_option7(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '7-3'
program_ran, outfile = convert._2g_bin(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.split(outfile)[1], 'measurements.txt')
def test_2g_fail_option6(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['samp_con'] = '6'
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, 'Naming convention option [6] not currently supported')
def test_2g_with_bad_file(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1ax.dat'
program_ran, error_message = convert._2g_bin(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, "bad mag file")
def test_2g_with_options(self):
options = {}
options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
options['mag_file'] = 'mn001-1a.dat'
options['meas_file'] = 'mn001-1a.magic'
options['samp_con'] = '4-3'
options['inst'] = 'instrument'
options['noave'] = 0
options['specnum'] = 2
options['location'] = 'location'
options['or_con'] = '4'
options['gmeths'] = 'FS-LOC-MAP:SO-POM'
program_ran, outfile = convert._2g_bin(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.split(outfile)[1], 'mn001-1a.magic')
def test_2g_with_path(self):
options = {}
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'2g_bin_magic', 'mn1')
#options['input_dir'] = os.path.join(WD, 'data_files', 'convert_2_magic',
# '2g_bin_magic', 'mn1')
options['mag_file'] = os.path.join(input_dir, 'mn001-1a.dat')
options['meas_file'] = os.path.join(input_dir, 'mn001-1a.magic')
options['spec_file'] = os.path.join('data_files', 'custom_specimens.txt')
options['dir_path'] = 'data_files'
program_ran, outfile = convert._2g_bin(**options)
self.assertEqual(outfile, options['meas_file'])
self.assertTrue(os.path.exists(options['meas_file']))
self.assertTrue(os.path.exists(os.path.join('data_files', 'sites.txt')))
class TestAgmMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt',
'agm_magic_example.magic', 'agm_magic_example_locations.txt',
'agm_magic_example_specimens.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_success(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'agm_magic')
program_ran, filename = convert.agm('agm_magic_example.agm',
meas_outfile='agm_magic_example.magic',
input_dir_path=input_dir, fmt="old")
self.assertTrue(program_ran)
def test_backfield_success(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'agm_magic')
program_ran, filename = convert.agm('agm_magic_example.irm',
meas_outfile='agm_magic_example.magic',
input_dir_path=input_dir, fmt="old", bak=True,
instrument="SIO-FLO")
class TestBgcMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'bgc_magic')
def tearDown(self):
filelist = ['96MT.05.01.magic', 'BC0-3A.magic',
'measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt']
pmag.remove_files(filelist, self.input_dir)
filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
'locations.txt', 'custom_specimens.txt', 'measurements.txt']
pmag.remove_files(filelist, WD)
pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_bgc_with_no_files(self):
with self.assertRaises(TypeError):
convert.bgc()
def test_bgc_success(self):
options = {'input_dir_path': self.input_dir, 'mag_file': '96MT.05.01'}
program_ran, outfile = convert.bgc(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_bgc_with_path(self):
options = {}
options['mag_file'] = os.path.join(self.input_dir, '96MT.05.01')
options['spec_file'] = os.path.join(WD, 'custom_specimens.txt')
options['dir_path'] = 'data_files'
program_ran, outfile = convert.bgc(**options)
self.assertEqual(outfile, os.path.join(WD, 'data_files', 'measurements.txt'))
self.assertTrue(os.path.isfile(options['spec_file']))
self.assertTrue(os.path.isfile(os.path.join(WD, 'data_files', 'samples.txt')))
def test_bgc_alternate_infile(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'BC0-3A'}
program_ran, outfile = convert.bgc(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'measurements.txt'))
def test_bgc_with_append(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'BC0-3A'}
program_ran, outfile = convert.bgc(**options)
self.assertTrue(program_ran)
options['append'] = True
program_ran, outfile = convert.bgc(**options)
self.assertTrue(program_ran)
lines, file_type = pmag.magic_read(os.path.join(WD, 'specimens.txt'))
self.assertEqual(len(lines), 2)
class TestCitMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
#loc_file = 'custom_locations.txt'
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'custom_locations.txt']
dir_path = os.path.join(WD, 'data_files')
pmag.remove_files(filelist, dir_path)
samp_file = 'custom_samples.txt'
dir_path = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
pmag.remove_files([samp_file], dir_path)
os.chdir(WD)
def test_cit_with_no_files(self):
program_ran, error_message = convert.cit()
self.assertFalse(program_ran)
self.assertEqual(error_message, 'bad sam file name')
def test_cit_magic_with_file(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_cit_magic_with_path(self):
options = {}
#options['input_dir_path'] = os.path.join(WD, 'data_files',
# 'convert_2_magic',
# 'cit_magic', 'PI47')pppp
options['magfile'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47', 'PI47-.sam')
options['loc_file'] = 'custom_locations.txt'
options['samp_file'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47', 'custom_samples.txt')
options['dir_path'] = os.path.join(WD, 'data_files')
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
for fname in [os.path.join(WD, 'data_files', options['loc_file']),
options['samp_file'],
os.path.join(WD, 'data_files', 'specimens.txt')]:
self.assertTrue(os.path.isfile(fname))
def test_cit_magic_fail_option4(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
options['samp_con'] = '4'
program_ran, error_message = convert.cit(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, "naming convention option [4] must be in form 4-Z where Z is an integer")
def test_cit_magic_succeed_option4(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
options['samp_con'] = '4-3'
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
def test_cit_magic_with_options(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
options['samp_con'] = '2'
options['methods'] = ['SO-SM:SO-MAG']
options['locname'] = 'location'
options['noave'] = 1
options['specnum'] = 2
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
def test_cit_magic_with_other_data(self):
options = {}
options['input_dir_path'] = os.path.join(WD, 'data_files',
'convert_2_magic',
'cit_magic', 'PI47')
options['magfile'] = 'PI47-.sam'
options['samp_con'] = '1'
options['methods'] = ['SO-SM:SO-MAG']
options['locname'] = 'location'
options['noave'] = 1
options['specnum'] = 2
program_ran, outfile = convert.cit(**options)
self.assertTrue(program_ran)
expected_file = os.path.join('measurements.txt')
self.assertEqual(outfile, expected_file)
class TestGenericMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['generic_magic_example.magic']
directory = os.path.join(WD, 'data_files', 'convert_2_magic',
'generic_magic')
pmag.remove_files(filelist, directory)
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_generic_magic_no_exp(self):
dir_path = os.path.join('data_files', 'convert_2_magic',
'generic_magic')
options = {}
options['magfile'] = os.path.join(dir_path, 'generic_magic_example.txt')
options['meas_file'] = os.path.join(dir_path, 'generic_magic_example.magic')
program_ran, error_message = convert.generic(**options)
self.assertFalse(program_ran)
no_exp_error = "Must provide experiment. Please provide experiment type of: Demag, PI, ATRM n (n of positions), CR (see help for format), NLT"
self.assertEqual(no_exp_error, error_message)
def test_generic_magic_success(self):
dir_path = os.path.join('data_files', 'convert_2_magic',
'generic_magic')
options = {}
options['magfile'] = os.path.join(dir_path, 'generic_magic_example.txt')
options['meas_file'] = os.path.join(dir_path, 'generic_magic_example.magic')
options['experiment'] = 'Demag'
program_ran, outfile_name = convert.generic(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile_name), os.path.realpath(options['meas_file']))
class TestHujiMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['Massada_AF_HUJI_new_format.magic']
directory = os.path.join(WD, 'data_files', 'convert_2_magic',
'huji_magic')
pmag.remove_files(filelist, directory)
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt',
'Massada_AF_HUJI_new_format.magic']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_with_bad_file(self):
program_ran, error_msg = convert.huji()
self.assertFalse(program_ran)
self.assertEqual(error_msg, "mag_file field is a required option")
program_ran, error_msg = convert.huji("fake")
self.assertFalse(program_ran)
self.assertEqual(error_msg, "bad mag file name")
def test_huji_magic_success(self):
dir_path = os.path.join('data_files', 'convert_2_magic',
'huji_magic')
full_file = os.path.join(dir_path, "Massada_AF_HUJI_new_format.txt")
options = {}
options['input_dir_path'] = dir_path
options['magfile'] = "Massada_AF_HUJI_new_format.txt"
options['meas_file'] = "Massada_AF_HUJI_new_format.magic"
options['codelist'] = 'AF'
program_ran, outfile = convert.huji(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, options['meas_file'])
def test_with_options(self):
dir_path = os.path.join('data_files', 'convert_2_magic',
'huji_magic')
options = {}
options['dir_path'] = dir_path
options['magfile'] = "Massada_AF_HUJI_new_format.txt"
options['meas_file'] = "Massada_AF_HUJI_new_format.magic"
options['codelist'] = "AF"
options['location'] = "Massada"
options['noave'] = True
options['user'] = "me"
options['labfield'] = 40
options['phi'] = 0
options['theta'] = 90
program_ran, outfile = convert.huji(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, options['meas_file'])
def test_with_no_exp_type(self):
dir_path = os.path.join('data_files', 'convert_2_magic', 'huji_magic')
mag_file = "Massada_AF_HUJI_new_format.txt"
res, error = convert.huji(mag_file, dir_path)
self.assertFalse(res)
self.assertEqual(error, "Must select experiment type (codelist/-LP, options are: [AF, T, ANI, TRM, CR])")
class TestHujiSampleMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['samples.txt', 'sites.txt']
directory = os.path.join(WD, 'data_files', 'convert_2_magic',
'huji_magic')
pmag.remove_files(filelist, directory)
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt',
'Massada_AF_HUJI_new_format.magic']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_success(self):
res, outfile = convert.huji_sample("magdelkrum_datafile.txt",
dir_path=os.path.join(WD, 'data_files', 'convert_2_magic', 'huji_magic'))
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(WD, 'data_files', 'convert_2_magic', 'huji_magic', 'samples.txt'))
class TestIodpSrmMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt', 'samples.txt',
'sites.txt', 'locations.txt',
'IODP_LIMS_SRMsection_366_U1494.csv.magic',
'IODP_LIMS_SRMsection_366_U1494_locations.txt',
'IODP_LIMS_SRMsection_366_U1494_samples.txt',
'IODP_LIMS_SRMsection_366_U1494_sites.txt',
'IODP_LIMS_SRMsection_366_U1494_specimens.txt']
dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
#directory = os.path.join(WD)
pmag.remove_files(filelist, dir_path)
dir_path = os.path.join(WD, 'data_files', 'convert_2_magic', 'iodp_srm_magic')
pmag.remove_files(filelist, dir_path)
dir_path = WD
pmag.remove_files(filelist, dir_path)
os.chdir(WD)
def test_iodp_with_no_files(self):
program_ran, error_message = convert.iodp_srm()
self.assertFalse(program_ran)
self.assertEqual(error_message, 'No .csv files were found')
#@unittest.skip("iodp_srm_magic is missing an example datafile")
def test_iodp_with_files(self):
options = {}
dir_path = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_srm_magic')
options['dir_path'] = dir_path
files = os.listdir(dir_path)
files = ['IODP_Janus_312_U1256.csv', 'SRM_318_U1359_B_A.csv' ] # this one takes way too long: IODP_LIMS_SRMsection_344_1414A.csv
info = []
for f in files:
if f.endswith('csv') and 'summary' not in f and 'discrete' not in f and 'sample' not in f:
options['csv_file'] = f
program_ran, outfile = convert.iodp_srm(**options)
meas_df = cb.MagicDataFrame(pmag.resolve_file_name(outfile, dir_path))
self.assertTrue(len(meas_df.df) > 0)
#@unittest.skip("iodp_srm_magic is missing an example datafile")
def test_iodp_with_one_file(self):
options = {}
#dir_path = os.path.join(WD, 'data_files', 'convert_2_magic',
# 'iodp_srm_magic')
dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
options['dir_path'] = dir_path
options['input_dir_path'] = dir_path
options['csv_file'] = 'srmsection-XXX-UTEST-A.csv'
program_ran, outfile = convert.iodp_srm(**options)
self.assertEqual(program_ran, True)
self.assertEqual(outfile, os.path.join('measurements.txt'))
meas_df = cb.MagicDataFrame(os.path.join(dir_path, outfile))
self.assertIn('sequence', meas_df.df.columns)
def test_iodp_with_one_file_with_path(self):
options = {}
dir_path = os.path.join('data_files', 'UTESTA', 'SRM_data')
#options['dir_path'] = dir_path
options['dir_path'] = WD #dir_path
options['input_dir_path'] = "fake/path"
options['csv_file'] = os.path.join(dir_path, 'srmsection-XXX-UTEST-A.csv')
program_ran, outfile = convert.iodp_srm(**options)
self.assertEqual(program_ran, True)
self.assertEqual(outfile, os.path.join('measurements.txt'))
@unittest.skip('broken')
class TestIodpDscrMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt', 'samples.txt',
'sites.txt', 'locations.txt', 'custom_samples.txt']
#directory = os.path.join(WD)
pmag.remove_files(filelist, WD)
pmag.remove_files(['custom_measurements.txt'], os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_iodp_with_no_files(self):
program_ran, error_message = convert.iodp_dscr()
self.assertFalse(program_ran)
self.assertEqual(error_message, 'No .csv files were found')
#@unittest.skip("iodp_srm_magic is missing an example datafile")
def test_iodp_with_one_file(self):
options = {}
#dir_path = os.path.join(WD, 'data_files', 'convert_2_magic',
#'iodp_srm_magic')
dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
options['input_dir_path'] = dir_path
options['csv_file'] = 'srmdiscrete-XXX-UTEST-A.csv'
program_ran, outfile = convert.iodp_dscr(**options)
self.assertEqual(program_ran, True)
self.assertEqual(outfile, 'measurements.txt')
def test_iodp_with_path(self):
options = {}
#dir_path = os.path.join(WD, 'data_files', 'convert_2_magic',
#'iodp_srm_magic')
dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
#options['input_dir_path'] = dir_path
options['csv_file'] = os.path.join('data_files', 'UTESTA', 'SRM_data', 'srmdiscrete-XXX-UTEST-A.csv')
options['meas_file'] = os.path.join(WD, 'data_files', 'custom_measurements.txt')
options['samp_file'] = 'custom_samples.txt'
program_ran, outfile = convert.iodp_dscr(**options)
self.assertEqual(program_ran, True)
self.assertEqual(outfile, os.path.join(WD, 'data_files', 'custom_measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
class TestIodpJr6Magic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
files = ['test.magic', 'other_er_samples.txt',
'custom_locations.txt', 'samples.txt', 'sites.txt',
'locations.txt', 'measurements.txt', 'specimens.txt']
pmag.remove_files(files, WD)
# then, make sure that hidden_er_samples.txt has been successfully renamed to er_samples.txt
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_jr6_magic')
hidden_sampfile = os.path.join(input_dir, 'hidden_er_samples.txt')
sampfile = os.path.join(input_dir, 'er_samples.txt')
if os.path.exists(hidden_sampfile):
os.rename(hidden_sampfile, sampfile)
pmag.remove_files(['custom_specimens.txt'], 'data_files')
os.chdir(WD)
def test_iodp_jr6_with_no_files(self):
with self.assertRaises(TypeError):
convert.iodp_jr6()
def test_iodp_jr6_with_invalid_mag_file(self):
options = {'mag_file': 'fake'}
program_ran, error_message = convert.iodp_jr6(**options)
expected_msg = 'The input file you provided: {} does not exist.\nMake sure you have specified the correct filename AND correct input directory name.'.format(os.path.realpath(os.path.join('.', 'fake')))
self.assertFalse(program_ran)
self.assertEqual(error_message, expected_msg)
#@unittest.skipIf('win32' in sys.platform or 'win62' in sys.platform, "Requires up to date version of pandas")
def test_iodp_jr6_with_magfile(self):
options = {}
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_jr6_magic')
options['input_dir_path'] = input_dir
mag_file = 'test.jr6'
options['mag_file'] = 'test.jr6'
meas_file = 'test.magic'
options['meas_file'] = meas_file
program_ran, outfile = convert.iodp_jr6(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, meas_file)
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_iodp_jr6_with_path(self):
options = {}
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_jr6_magic')
#options['input_dir_path'] = input_dir
mag_file = os.path.join('data_files', 'convert_2_magic', 'iodp_jr6_magic', 'test.jr6')
options['mag_file'] = mag_file #'test.jr6'
options['spec_file'] = os.path.join('data_files', 'custom_specimens.txt')
options['loc_file'] = 'custom_locations.txt'
meas_file = 'test.magic'
options['meas_file'] = meas_file
program_ran, outfile = convert.iodp_jr6(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, meas_file)
for fname in [options['loc_file'], options['spec_file']]:
self.assertTrue(os.path.isfile(fname))
#@unittest.skipIf('win32' in sys.platform or 'win62' in sys.platform, "Requires up to date version of pandas")
def test_iodp_jr6_with_options(self):
options = {}
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_jr6_magic')
options['input_dir_path'] = input_dir
mag_file = 'test.jr6'
options['mag_file'] = 'test.jr6'
meas_file = 'test.magic'
options['meas_file'] = meas_file
options['noave'] = 1
options['lat'] = 3
options['lon'] = 5
options['volume'] = 3
program_ran, outfile = convert.iodp_jr6(**options)
self.assertTrue(program_ran)
self.assertEqual(outfile, meas_file)
class TestIodpSamplesMagic(unittest.TestCase):
def setUp(self):
self.input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'iodp_srm_magic')
def tearDown(self):
os.chdir(WD)
filelist = ['er_samples.txt']
pmag.remove_files(filelist, WD)
def test_with_wrong_format(self):
infile = os.path.join(self.input_dir, 'GCR_U1359_B_coresummary.csv')
program_ran, error_message = convert.iodp_samples(infile)
self.assertFalse(program_ran)
expected_error = 'Could not extract the necessary data from your input file.\nPlease make sure you are providing a correctly formated IODP samples csv file.'
self.assertEqual(error_message, expected_error)
def test_with_right_format(self):
reference_file = os.path.join(WD, 'testing', 'odp_magic',
'odp_magic_er_samples.txt')
infile = os.path.join(self.input_dir, 'samples_318_U1359_B.csv')
program_ran, outfile = convert.iodp_samples(infile, data_model_num=2)
self.assertTrue(program_ran)
expected_file = os.path.realpath(os.path.join('.', 'er_samples.txt'))
self.assertEqual(os.path.realpath(outfile), expected_file)
self.assertTrue(os.path.isfile(outfile))
def test_content_with_right_format(self):
reference_file = os.path.join(WD, 'data_files', 'testing',
'odp_magic', 'odp_magic_er_samples.txt')
infile = os.path.join(self.input_dir, 'samples_318_U1359_B.csv')
program_ran, outfile = convert.iodp_samples(infile, data_model_num=2)
with open(reference_file) as ref_file:
ref_lines = ref_file.readlines()
with open(outfile) as out_file:
out_lines = out_file.readlines()
self.assertTrue(program_ran)
self.assertEqual(ref_lines, out_lines)
def test_with_data_model3(self):
infile = os.path.join(self.input_dir, 'samples_318_U1359_B.csv')
program_ran, outfile = convert.iodp_samples(infile, data_model_num=3)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath('samples.txt'), os.path.realpath(outfile))
class TestIodpSamplesCsv(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.hole_lat = -56.557775
self.hole_lon = -42.64212833333333
self.dir_path = "data_files/iodp_magic/U999A"
def tearDown(self):
files = ['lims_specimens.txt', 'lims_samples.txt',
'lims_sites.txt', 'locations.txt']
pmag.remove_files(files, WD)
def test_success(self):
comp_depth_key='Top depth CSF-B (m)'
samp_file = "samples_17_5_2019.csv"
# do the heavy lifting:
res, outfile = convert.iodp_samples_csv(samp_file, input_dir_path=self.dir_path,
spec_file='lims_specimens.txt',
samp_file='lims_samples.txt', site_file='lims_sites.txt',
dir_path=".", comp_depth_key=comp_depth_key,
lat=self.hole_lat, lon=self.hole_lon)
self.assertTrue(res)
for fname in ['lims_specimens.txt', 'lims_samples.txt', 'lims_sites.txt', 'locations.txt']:
self.assertTrue(os.path.exists(fname))
class TestIodpSrmLore(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.hole_lat = -56.557775
self.hole_lon = -42.64212833333333
self.dir_path = "data_files/iodp_magic/U999A"
def tearDown(self):
files = ['srm_arch_specimens.txt', 'srm_arch_samples.txt',
'srm_arch_sites.txt', 'srm_arch_measurements.txt']
pmag.remove_files(files, WD)
def test_success(self):
comp_depth_key = 'Depth CSF-B (m)'
srm_archive_file = "srmsection_17_5_2019.csv"
srm_archive_dir = os.path.join(self.dir_path, 'SRM_archive_data')
res, outfile = convert.iodp_srm_lore(srm_archive_file, meas_file='srm_arch_measurements.txt',
comp_depth_key=comp_depth_key, dir_path=".",
input_dir_path=srm_archive_dir,
lat=self.hole_lat, lon=self.hole_lon)
files = ['srm_arch_specimens.txt', 'srm_arch_samples.txt',
'srm_arch_sites.txt', 'srm_arch_measurements.txt']
for fname in files:
self.assertTrue(os.path.exists(fname))
self.assertTrue(res)
class TestIodpDscrLore(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.hole_lat = -56.557775
self.hole_lon = -42.64212833333333
self.dir_path = "data_files/iodp_magic/U999A"
# make specimen file needed for conversion
comp_depth_key='Top depth CSF-B (m)'
samp_file = "samples_17_5_2019.csv"
# do the heavy lifting:
res, outfile = convert.iodp_samples_csv(samp_file, input_dir_path=self.dir_path,
spec_file='lims_specimens.txt',
samp_file='lims_samples.txt', site_file='lims_sites.txt',
dir_path=".", comp_depth_key=comp_depth_key,
lat=self.hole_lat, lon=self.hole_lon)
self.hole_lat = -56.557775
self.hole_lon = -42.64212833333333
self.dir_path = "data_files/iodp_magic/U999A"
def tearDown(self):
files = ['srm_arch_specimens.txt', 'srm_arch_samples.txt',
'srm_arch_sites.txt', 'srm_arch_measurements.txt',
'srm_dscr_measurements.txt']
pmag.remove_files(files, WD)
def test_success(self):
srm_discrete_file = "srmdiscrete_17_5_2019.csv"
srm_discrete_dir = os.path.join(self.dir_path, 'SRM_discrete_data')
res, outfile = convert.iodp_dscr_lore(srm_discrete_file, meas_file='srm_dscr_measurements.txt',
dir_path=".",input_dir_path=srm_discrete_dir,
spec_file='lims_specimens.txt')
self.assertTrue(res)
self.assertTrue(os.path.exists(outfile))
class TestIodpJr6Lore(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.hole_lat = -56.557775
self.hole_lon = -42.64212833333333
self.dir_path = "data_files/iodp_magic/U999A"
# generate specimens/samples files needed for conversion
comp_depth_key='Top depth CSF-B (m)'
samp_file = "samples_17_5_2019.csv"
res, outfile = convert.iodp_samples_csv(samp_file, input_dir_path=self.dir_path,
spec_file='lims_specimens.txt',
samp_file='lims_samples.txt', site_file='lims_sites.txt',
dir_path=".", comp_depth_key=comp_depth_key,
lat=self.hole_lat, lon=self.hole_lon)
def tearDown(self):
files = ['lims_specimens.txt', 'lims_samples.txt',
'lims_sites.txt', 'locations.txt', 'jr6_measurements.txt']
pmag.remove_files(files, WD)
def test_success(self):
jr6_dir = os.path.join(self.dir_path, 'JR6_data')
jr6_file = "spinner_17_5_2019.csv"
res, outfile = convert.iodp_jr6_lore(jr6_file,meas_file='jr6_measurements.txt',dir_path=".",
input_dir_path=jr6_dir, spec_file='lims_specimens.txt',
noave=False)
self.assertTrue(res)
self.assertTrue(os.path.exists(outfile))
class TestIodpKly4sLore(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.hole_lat = -56.557775
self.hole_lon = -42.64212833333333
self.dir_path = "data_files/iodp_magic/U999A"
# generate specimens/samples files needed for conversion
comp_depth_key='Top depth CSF-B (m)'
samp_file = "samples_17_5_2019.csv"
res, outfile = convert.iodp_samples_csv(samp_file, input_dir_path=self.dir_path,
spec_file='lims_specimens.txt',
samp_file='lims_samples.txt', site_file='lims_sites.txt',
dir_path=".", comp_depth_key=comp_depth_key,
lat=self.hole_lat, lon=self.hole_lon)
def tearDown(self):
files = ['lims_specimens.txt', 'lims_samples.txt',
'lims_sites.txt', 'locations.txt',
'kly4s_specimens.txt', 'kly4s_measurements.txt']
pmag.remove_files(files, WD)
def test_success(self):
kly4s_dir = os.path.join(self.dir_path, 'KLY4S_data')
kly4s_file = "ex-kappa_17_5_2019.csv"
res, outfile = convert.iodp_kly4s_lore(kly4s_file, meas_out='kly4s_measurements.txt',
spec_infile='lims_specimens.txt',
spec_out='kly4s_specimens.txt',
dir_path=".", input_dir_path=kly4s_dir,
actual_volume=7)
self.assertTrue(res)
self.assertTrue(os.path.exists(outfile))
self.assertTrue(os.path.exists('kly4s_specimens.txt'))
class TestJr6TxtMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
files = ['test.magic', 'other_er_samples.txt',
'custom_locations.txt', 'samples.txt', 'sites.txt',
'measurements.txt', 'locations.txt', 'specimens.txt']
pmag.remove_files(files, WD)
def test_success(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'jr6_magic')
output = convert.jr6_txt(**{'mag_file': 'AP12.txt', 'input_dir_path': input_dir})
self.assertTrue(output[0])
self.assertEqual(output[1], 'measurements.txt')
def test_with_options(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'jr6_magic')
options = {'mag_file': 'AP12.txt', 'input_dir_path': input_dir}
options['meas_file'] = "test.magic"
options['lat'] = 1
options['lon'] = 2
options['noave'] = True
output = convert.jr6_txt(**options)
self.assertTrue(output[0])
self.assertEqual(output[1], 'test.magic')
site_df = cb.MagicDataFrame(os.path.join(WD, 'sites.txt'))
self.assertEqual(1, site_df.df.lat.values[0])
class TestJr6Jr6Magic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
files = ['test.magic', 'other_er_samples.txt',
'custom_locations.txt', 'samples.txt', 'sites.txt',
'measurements.txt', 'locations.txt', 'specimens.txt']
pmag.remove_files(files, WD)
def test_success(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'jr6_magic')
output = convert.jr6_jr6(**{'mag_file': 'AF.jr6', 'input_dir_path': input_dir})
self.assertTrue(output[0])
self.assertEqual(os.path.realpath(output[1]), os.path.realpath('measurements.txt'))
def test_with_options(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'jr6_magic')
options = {'mag_file': 'SML07.JR6', 'input_dir_path': input_dir}
options['meas_file'] = "test.magic"
options['lat'] = 1
options['lon'] = 2
options['noave'] = True
output = convert.jr6_jr6(**options)
self.assertTrue(output[0])
self.assertEqual(os.path.realpath(output[1]), os.path.realpath('test.magic'))
site_df = cb.MagicDataFrame(os.path.join(WD, 'sites.txt'))
self.assertEqual(1, site_df.df.lat.values[0])
class TestKly4sMagic(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
filelist= ['magic_measurements.txt', 'my_magic_measurements.txt', 'er_specimens.txt', 'er_samples.txt', 'er_sites.txt', 'rmag_anisotropy.txt', 'my_rmag_anisotropy.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_kly4s_without_infile(self):
with self.assertRaises(TypeError):
convert.kly4s()
def test_kly4s_with_invalid_infile(self):
program_ran, error_message = convert.kly4s('hello.txt')
expected_file = os.path.realpath(os.path.join('.', 'hello.txt'))
self.assertFalse(program_ran)
self.assertEqual(error_message, 'Error opening file: {}'.format(expected_file))
def test_kly4s_with_valid_infile(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, outfile = convert.kly4s('KLY4S_magic_example.dat', dir_path=WD,
input_dir_path=in_dir, data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'magic_measurements.txt'))
def test_kly4s_fail_option4(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, error_message = convert.kly4s('KLY4S_magic_example.dat', samp_con="4",
dir_path=WD, input_dir_path=in_dir,
data_model_num=2)
self.assertFalse(program_ran)
self.assertEqual(error_message, "option [4] must be in form 4-Z where Z is an integer")
def test_kly4s_succeed_option4(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, outfile = convert.kly4s('KLY4S_magic_example.dat', samp_con="4-2",
dir_path=WD, input_dir_path=in_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'magic_measurements.txt'))
self.assertTrue(os.path.isfile(os.path.join(WD, 'magic_measurements.txt')))
def test_kly4s_with_options(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, outfile = convert.kly4s('KLY4S_magic_example.dat', specnum=1,
locname="location", inst="instrument",
samp_con=3, or_con=2,
measfile='my_magic_measurements.txt',
aniso_outfile="my_rmag_anisotropy.txt",
dir_path=WD, input_dir_path=in_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.join(WD, 'my_magic_measurements.txt'))
self.assertTrue(os.path.isfile(os.path.join(WD, 'my_rmag_anisotropy.txt')))
def test_kly4s_with_valid_infile_data_model3(self):
in_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'kly4s_magic')
program_ran, outfile = convert.kly4s('KLY4S_magic_example.dat', dir_path=WD,
input_dir_path=in_dir, data_model_num=3)
con = cb.Contribution(WD)
self.assertEqual(['measurements', 'samples', 'sites', 'specimens'], sorted(con.tables))
class TestK15Magic(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
filelist = ['magic_measurements.txt', 'my_magic_measurements.txt',
'er_specimens.txt', 'er_samples.txt', 'my_er_samples.txt',
'er_sites.txt', 'rmag_anisotropy.txt',
'my_rmag_anisotropy.txt', 'rmag_results.txt',
'my_rmag_results.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_k15_with_files(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'k15_magic')
program_ran, outfile = convert.k15('k15_example.dat',
input_dir_path=input_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.realpath('magic_measurements.txt'))
def test_k15_fail_option4(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'k15_magic')
program_ran, error_message = convert.k15('k15_example.dat',
sample_naming_con="4",
input_dir_path=input_dir,
data_model_num=2)
self.assertFalse(program_ran)
self.assertEqual(error_message, "option [4] must be in form 4-Z where Z is an integer")
def test_k15_succeed_option4(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 'k15_magic')
program_ran, outfile = convert.k15('k15_example.dat', sample_naming_con="4-2",
input_dir_path=input_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.realpath("magic_measurements.txt"))
def test_k15_with_options(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'k15_magic')
program_ran, outfile = convert.k15('k15_example.dat', specnum=2,
sample_naming_con="3",
location="Here",
meas_file="my_magic_measurements.txt",
samp_file="my_er_samples.txt",
aniso_outfile="my_rmag_anisotropy.txt",
result_file="my_rmag_results.txt",
input_dir_path=input_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.realpath("my_magic_measurements.txt"))
def test_data_model3(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'k15_magic')
program_ran, outfile = convert.k15('k15_example.dat', specnum=2,
input_dir_path=input_dir)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath('./measurements.txt'), os.path.realpath(outfile))
class TestLdeoMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'ldeo_magic')
def tearDown(self):
#filelist = ['measurements.txt', 'specimens.txt',
# 'samples.txt', 'sites.txt']
#pmag.remove_files(filelist, self.input_dir)
filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
'locations.txt', 'custom_specimens.txt', 'measurements.txt',
'custom_measurements.txt']
pmag.remove_files(filelist, WD)
#pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_ldeo_with_no_files(self):
with self.assertRaises(TypeError):
convert.ldeo()
def test_ldeo_success(self):
options = {'input_dir_path': self.input_dir, 'magfile': 'ldeo_magic_example.dat'}
program_ran, outfile = convert.ldeo(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_ldeo_options(self):
options = {'input_dir_path': self.input_dir, 'magfile': 'ldeo_magic_example.dat'}
options['noave'] = 1
options['specnum'] = 2
options['samp_con'] = 2
options['meas_file'] = "custom_measurements.txt"
options['location'] = "new place"
options['labfield'], options['phi'], options['theta'] = 40, 0, 90
program_ran, outfile = convert.ldeo(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(options['meas_file']), os.path.realpath(outfile))
class TestLivdbMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'livdb_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
#filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
# 'locations.txt', 'custom_specimens.txt', 'measurements.txt']
#pmag.remove_files(filelist, '.')
#pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_livdb_success(self):
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"))
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("measurements.txt"))
def test_livdb_all_experiment_types(self):
for folder in ["TH_IZZI+", "MW_C+", "MW_IZZI+andC++", "MW_OT+", "MW_P"]:
res, meas_file = convert.livdb(os.path.join(self.input_dir, folder))
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("measurements.txt"))
def test_with_options(self):
# naming con 1
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"),
location_name="place", samp_name_con=1, meas_out="custom.txt")
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("custom.txt"))
df = cb.MagicDataFrame(os.path.join(WD, "specimens.txt"))
self.assertEqual("ATPIPV04-1A", df.df.loc["ATPIPV04-1A"]['sample'])
# naming con 2 without chars
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"),
location_name="place", samp_name_con=2, site_name_con=2,
meas_out="custom.txt")
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("custom.txt"))
df = cb.MagicDataFrame(os.path.join(WD, "specimens.txt"))
self.assertEqual("ATPIPV04-1A", df.df.loc['ATPIPV04-1A']['sample'])
df = cb.MagicDataFrame(os.path.join(WD, "samples.txt"))
self.assertEqual("ATPIPV04-1A", df.df.loc['ATPIPV04-1A']['site'])
def test_naming_con_2(self):
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"),
location_name="place", samp_name_con=2, samp_num_chars=1,
meas_out="custom.txt")
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("custom.txt"))
df = cb.MagicDataFrame(os.path.join(WD, "specimens.txt"))
self.assertEqual("ATPIPV04-1", df.df.loc["ATPIPV04-1A"]['sample'])
def test_naming_con_3(self):
res, meas_file = convert.livdb(os.path.join(self.input_dir, "TH_IZZI+"),
location_name="place", samp_name_con=3, samp_num_chars="-",
meas_out="custom.txt")
self.assertTrue(res)
self.assertEqual(meas_file, os.path.realpath("custom.txt"))
df = cb.MagicDataFrame(os.path.join(WD, "specimens.txt"))
self.assertEqual(df.df.loc['ATPIPV04-1A']['sample'], 'ATPIPV04')
df = cb.MagicDataFrame(os.path.join(WD, "samples.txt"))
self.assertEqual(df.df.loc['ATPIPV04']['site'], "ATPIPV04")
class TestMstMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'mst_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
#filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
# 'locations.txt', 'custom_specimens.txt', 'measurements.txt']
pmag.remove_files(filelist, '.')
pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_mst_with_no_files(self):
with self.assertRaises(TypeError):
convert.mst()
def test_mst_success(self):
options = {'input_dir_path': self.input_dir, 'infile': 'curie_example.dat'}
options['spec_name'] = 'abcde'
options['location'] = 'place'
program_ran, outfile = convert.mst(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
self.assertEqual(meas_df.df.location.values[0], 'place')
con = cb.Contribution(WD)
for table in ['measurements', 'specimens', 'samples', 'sites', 'locations']:
self.assertIn(table, con.tables)
def test_mst_synthetic(self):
options = {'input_dir_path': self.input_dir, 'infile': 'curie_example.dat'}
options['spec_name'] = 'abcde'
options['syn'] = True
program_ran, outfile = convert.mst(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
class TestMiniMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'mini_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
def test_bad_file(self):
program_ran, error = convert.mini('fake_file')
self.assertFalse(program_ran)
self.assertEqual(error, "bad mag file name")
def test_success(self):
magfile = os.path.join(self.input_dir, "Peru_rev1.txt")
program_ran, outfile = convert.mini(magfile)
self.assertTrue(program_ran)
self.assertEqual(outfile, "measurements.txt")
def test_options(self):
magfile = os.path.join(self.input_dir, "Peru_rev1.txt")
program_ran, outfile = convert.mini(magfile, meas_file="custom.out",
user="me", noave=1, volume=15,
methcode="LP:FAKE")
self.assertTrue(program_ran)
self.assertEqual(outfile, "custom.out")
def test_dm_2(self):
magfile = os.path.join(self.input_dir, "Peru_rev1.txt")
program_ran, outfile = convert.mini(magfile, meas_file="custom.out",
user="me", noave=1, volume=15,
methcode="LP:FAKE", data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, "custom.out")
class TestPmdMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'pmd_magic', 'PMD', )
def tearDown(self):
filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
'locations.txt', 'custom_specimens.txt', 'measurements.txt',
'custom_meas.txt']
pmag.remove_files(filelist, WD)
pmag.remove_files(filelist, ".")
os.chdir(WD)
def test_pmd_with_no_files(self):
with self.assertRaises(TypeError):
convert.pmd()
def test_pmd_success(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'ss0207a.pmd'}
program_ran, outfile = convert.pmd(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
def test_pmd_options(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'ss0207a.pmd'}
options['lat'], options['lon'] = 5, 10
options['specnum'] = 2
options['location'] = 'place'
options['meas_file'] = 'custom_meas.txt'
program_ran, outfile = convert.pmd(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'custom_meas.txt'))
loc_df = cb.MagicDataFrame(os.path.join(WD, 'locations.txt'))
self.assertEqual(loc_df.df.index.values[0], 'place')
class TestSioMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
def tearDown(self):
filelist = ['sio_af_example.magic']
directory = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic')
pmag.remove_files(filelist, directory)
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_sio_magic_no_files(self):
with self.assertRaises(TypeError):
convert.sio()
def test_sio_magic_success(self):
options = {}
dir_path = os.path.join('data_files', 'convert_2_magic',
'sio_magic')
options['mag_file'] = os.path.join(dir_path, 'sio_af_example.dat')
options['meas_file'] = os.path.join(dir_path, 'sio_af_example.magic')
program_ran, file_name = convert.sio(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(file_name),
os.path.realpath(options['meas_file']))
meas_df = cb.MagicDataFrame(os.path.realpath(options['meas_file']))
self.assertIn('sequence', meas_df.df.columns)
self.assertEqual(0, meas_df.df.iloc[0]['sequence'])
def test_sio_magic_success_with_wd(self):
options = {}
dir_path = os.path.join('data_files', 'convert_2_magic',
'sio_magic')
options['mag_file'] = os.path.join('sio_af_example.dat')
options['meas_file'] = os.path.join('sio_af_example.magic')
options['dir_path'] = dir_path
program_ran, file_name = convert.sio(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(file_name),
os.path.realpath(os.path.join(dir_path, options['meas_file'])))
def test_sio_magic_fail_option4(self):
options = {}
options['mag_file'] = os.path.join(WD, 'data_files',
'convert_2_magic', 'sio_magic',
'sio_af_example.dat')
meas_file = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic', 'sio_af_example.magic')
options['meas_file'] = meas_file
options['samp_con'] = '4'
program_ran, error_message = convert.sio(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, "naming convention option [4] must be in form 4-Z where Z is an integer")
def test_sio_magic_succeed_option4(self):
options = {}
options['mag_file'] = os.path.join(WD, 'data_files',
'convert_2_magic', 'sio_magic',
'sio_af_example.dat')
meas_file = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic', 'sio_af_example.magic')
options['meas_file'] = meas_file
options['samp_con'] = '4-2'
program_ran, file_name = convert.sio(**options)
self.assertTrue(program_ran)
self.assertEqual(file_name, meas_file)
def test_sio_magic_fail_with_coil(self):
options = {}
options['mag_file'] = os.path.join(WD, 'data_files',
'convert_2_magic', 'sio_magic',
'sio_af_example.dat')
meas_file = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic', 'sio_af_example.magic')
options['meas_file'] = meas_file
options['coil'] = 4
program_ran, error_message = convert.sio(**options)
self.assertFalse(program_ran)
self.assertEqual(error_message, '4 is not a valid coil specification')
def test_sio_magic_succeed_with_coil(self):
options = {}
options['mag_file'] = os.path.join(WD, 'data_files',
'convert_2_magic', 'sio_magic',
'sio_af_example.dat')
meas_file = os.path.join(WD, 'data_files', 'convert_2_magic',
'sio_magic', 'sio_af_example.magic')
options['meas_file'] = meas_file
options['coil'] = '1'
program_ran, file_name = convert.sio(**options)
self.assertTrue(program_ran)
self.assertEqual(file_name, meas_file)
class TestSMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files', 'convert_2_magic', 's_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
pmag.remove_files(filelist, self.input_dir)
def test_with_invalid_file(self):
res, error_msg = convert.s_magic('fake.txt')
self.assertFalse(res)
expected_file = os.path.join(WD, "fake.txt")
self.assertEqual(error_msg, "No such file: {}".format(expected_file))
def test_success(self):
res, outfile = convert.s_magic("s_magic_example.dat", dir_path=self.input_dir)
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(self.input_dir, "specimens.txt"))
def test_with_options(self):
res, outfile = convert.s_magic("s_magic_example.dat", dir_path=self.input_dir,
specnum=1, location="place", spec="abcd-efg",
user="me", samp_con=2)
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(self.input_dir, "specimens.txt"))
self.assertTrue(os.path.exists(os.path.join(self.input_dir, "sites.txt")))
con = cb.Contribution(self.input_dir)
self.assertIn('sites', con.tables)
self.assertEqual('place', con.tables['sites'].df.loc[:, 'location'].values[0])
class TestSufarAscMagic(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
filelist = ['magic_measurements.txt', 'my_magic_measurements.txt',
'er_specimens.txt', 'er_samples.txt', 'my_er_samples.txt',
'er_sites.txt', 'rmag_anisotropy.txt', 'my_rmag_anisotropy.txt',
'rmag_results.txt', 'my_rmag_results.txt', 'measurements.txt',
'specimens.txt', 'samples.txt', 'sites.txt', 'locations.txt']
pmag.remove_files(filelist, WD)
os.chdir(WD)
def test_sufar4_with_no_files(self):
with self.assertRaises(TypeError):
convert.sufar4()
def test_sufar4_with_invalid_file(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'sufar_asc_magic')
infile = 'fake_sufar4-asc_magic_example.txt'
program_ran, error_message = convert.sufar4(infile,
input_dir_path=input_dir,
data_model_num=2)
self.assertFalse(program_ran)
self.assertEqual(error_message,
'Error opening file: {}'.format(os.path.join(input_dir,
infile)))
def test_sufar4_with_infile(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
program_ran, outfile = convert.sufar4(infile,
input_dir_path=input_dir,
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.realpath(os.path.join('.', 'magic_measurements.txt')))
with open(outfile, 'r') as ofile:
lines = ofile.readlines()
self.assertEqual(292, len(lines))
def test_sufar4_succeed_data_model3(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
program_ran, outfile = convert.sufar4(infile,
input_dir_path=input_dir)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile),
os.path.realpath(os.path.join('.', 'measurements.txt')))
with open(outfile, 'r') as ofile:
lines = ofile.readlines()
self.assertEqual(292, len(lines))
self.assertEqual('measurements', lines[0].split('\t')[1].strip())
con = cb.Contribution(WD)
self.assertEqual(sorted(con.tables),
sorted(['measurements', 'specimens',
'samples', 'sites']))
def test_sufar4_fail_option4(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
program_ran, error_message = convert.sufar4(infile,
input_dir_path=input_dir,
sample_naming_con='4',
data_model_num=2)
self.assertFalse(program_ran)
self.assertEqual(error_message, "option [4] must be in form 4-Z where Z is an integer")
def test_sufar4_succeed_option4(self):
input_dir = os.path.join(WD, 'data_files', 'convert_2_magic',
'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
ofile = 'my_magic_measurements.txt'
program_ran, outfile = convert.sufar4(infile,
meas_output=ofile,
input_dir_path=input_dir,
sample_naming_con='4-2',
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.realpath(os.path.join('.', ofile)))
def test_sufar4_with_options(self):
input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'sufar_asc_magic')
infile = 'sufar4-asc_magic_example.txt'
program_ran, outfile = convert.sufar4(infile, meas_output='my_magic_measurements.txt',
aniso_output="my_rmag_anisotropy.txt",
specnum=2, locname="Here", instrument="INST",
static_15_position_mode=True, input_dir_path=input_dir,
sample_naming_con='5',
data_model_num=2)
self.assertTrue(program_ran)
self.assertEqual(outfile, os.path.realpath(os.path.join('.', 'my_magic_measurements.txt')))
class TestTdtMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'tdt_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
pmag.remove_files(filelist, '.')
pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_success(self):
res, outfile = convert.tdt(self.input_dir)
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(self.input_dir, "measurements.txt"))
def test_with_options(self):
res, outfile = convert.tdt(self.input_dir, meas_file_name="custom.out", location="here",
user="me", samp_name_con=2, samp_name_chars=1, site_name_con=2,
site_name_chars=1, volume=15., lab_inc=-90)
self.assertTrue(res)
self.assertEqual(outfile, os.path.join(self.input_dir, "custom.out"))
df = cb.MagicDataFrame(os.path.join(self.input_dir, "samples.txt"))
self.assertEqual("MG", df.df["site"].values[0])
self.assertEqual("MGH", df.df["sample"].values[0])
class TestUtrechtMagic(unittest.TestCase):
def setUp(self):
os.chdir(WD)
self.input_dir = os.path.join(WD, 'data_files',
'convert_2_magic', 'utrecht_magic')
def tearDown(self):
filelist = ['measurements.txt', 'specimens.txt',
'samples.txt', 'sites.txt', 'locations.txt', 'custom.out']
pmag.remove_files(filelist, WD)
#filelist = ['specimens.txt', 'samples.txt', 'sites.txt',
# 'locations.txt', 'custom_specimens.txt', 'measurements.txt']
pmag.remove_files(filelist, '.')
pmag.remove_files(filelist, os.path.join(WD, 'data_files'))
os.chdir(WD)
def test_utrecht_with_no_files(self):
with self.assertRaises(TypeError):
convert.utrecht()
def test_utrecht_success(self):
options = {'input_dir_path': self.input_dir, 'mag_file': 'Utrecht_Example.af'}
program_ran, outfile = convert.utrecht(**options)
self.assertTrue(program_ran)
self.assertEqual(os.path.realpath(outfile), os.path.join(WD, 'measurements.txt'))
meas_df = cb.MagicDataFrame(outfile)
self.assertIn('sequence', meas_df.df.columns)
| 44.704113
| 209
| 0.585392
|
00108bf215fd6861d561f98ece61b214640d13ac
| 6,889
|
py
|
Python
|
source/todo2.py
|
eclipse999/ToDoList
|
708eb31e112e6592a406e3f3f15d654c9f6fe7c2
|
[
"MIT"
] | null | null | null |
source/todo2.py
|
eclipse999/ToDoList
|
708eb31e112e6592a406e3f3f15d654c9f6fe7c2
|
[
"MIT"
] | null | null | null |
source/todo2.py
|
eclipse999/ToDoList
|
708eb31e112e6592a406e3f3f15d654c9f6fe7c2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'todo2.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(551, 475)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icon/iconfinder_document-03_1622833.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setStyleSheet("background-color: rgb(0, 0, 0);")
MainWindow.setIconSize(QtCore.QSize(25, 25))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.addbtn = QtWidgets.QPushButton(self.centralwidget)
self.addbtn.setGeometry(QtCore.QRect(0, 50, 181, 91))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.addbtn.setFont(font)
self.addbtn.setStyleSheet("QPushButton {\n"
" border: 1px solid gray;\n"
" color: rgb(255, 255, 255);\n"
"\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #000000, stop: 1 #323232);\n"
"}")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icon/iconfinder_icon-33-clipboard-add_315154.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.addbtn.setIcon(icon1)
self.addbtn.setIconSize(QtCore.QSize(25, 25))
self.addbtn.setObjectName("addbtn")
self.deletebtn = QtWidgets.QPushButton(self.centralwidget)
self.deletebtn.setGeometry(QtCore.QRect(0, 130, 181, 91))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.deletebtn.setFont(font)
self.deletebtn.setStyleSheet("QPushButton {\n"
" border: 1px solid gray;\n"
" color: rgb(255, 255, 255);\n"
"\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #000000, stop: 1 #323232);\n"
"}")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icon/iconfinder_draw-08_725558.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.deletebtn.setIcon(icon2)
self.deletebtn.setIconSize(QtCore.QSize(25, 25))
self.deletebtn.setObjectName("deletebtn")
self.clearallbtn = QtWidgets.QPushButton(self.centralwidget)
self.clearallbtn.setGeometry(QtCore.QRect(0, 210, 181, 91))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.clearallbtn.setFont(font)
self.clearallbtn.setStyleSheet("QPushButton {\n"
" border: 1px solid gray;\n"
" color: rgb(255, 255, 255);\n"
"\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #000000, stop: 1 #323232);\n"
"}")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icon/iconfinder_trash_4696642.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.clearallbtn.setIcon(icon3)
self.clearallbtn.setIconSize(QtCore.QSize(25, 25))
self.clearallbtn.setObjectName("clearallbtn")
self.savebtn = QtWidgets.QPushButton(self.centralwidget)
self.savebtn.setGeometry(QtCore.QRect(0, 300, 181, 91))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.savebtn.setFont(font)
self.savebtn.setStyleSheet("QPushButton {\n"
" border: 1px solid gray;\n"
" color: rgb(255, 255, 255);\n"
"\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #000000, stop: 1 #323232);\n"
"}")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/icon/iconfinder_simpline_53_2305609.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.savebtn.setIcon(icon4)
self.savebtn.setIconSize(QtCore.QSize(25, 25))
self.savebtn.setObjectName("savebtn")
self.loadbtn = QtWidgets.QPushButton(self.centralwidget)
self.loadbtn.setGeometry(QtCore.QRect(0, 390, 181, 91))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.loadbtn.setFont(font)
self.loadbtn.setStyleSheet("QPushButton {\n"
" border: 1px solid gray;\n"
" color: rgb(255, 255, 255);\n"
"\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #000000, stop: 1 #323232);\n"
"}")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/icon/iconfinder_Open_1493293.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.loadbtn.setIcon(icon5)
self.loadbtn.setIconSize(QtCore.QSize(25, 25))
self.loadbtn.setObjectName("loadbtn")
self.todolist = QtWidgets.QListWidget(self.centralwidget)
self.todolist.setGeometry(QtCore.QRect(180, 0, 371, 481))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(12)
self.todolist.setFont(font)
self.todolist.setStyleSheet("\n"
"\n"
"QListWidget::item {\n"
" color:white;\n"
"\n"
"}\n"
"\n"
"QListWidget::item:selected{\n"
" color:white;\n"
" \n"
" background-color: rgb(34, 104, 51);\n"
"\n"
"}")
self.todolist.setObjectName("todolist")
self.addlist = QtWidgets.QLineEdit(self.centralwidget)
self.addlist.setGeometry(QtCore.QRect(0, 0, 181, 51))
self.addlist.setStyleSheet("border: 1px solid gray;\n"
"color: rgb(255, 255, 255);\n"
"background-color: rgb(30,30,30);")
self.addlist.setObjectName("addlist")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "ToDo List"))
self.addbtn.setText(_translate("MainWindow", "加入項目"))
self.deletebtn.setText(_translate("MainWindow", "清除所選項目"))
self.clearallbtn.setText(_translate("MainWindow", "清除全部項目"))
self.savebtn.setText(_translate("MainWindow", "存取清單\n"
"(Ctrl+S)"))
self.loadbtn.setText(_translate("MainWindow", "讀取清單\n"
"(Ctrl+F)"))
import image_rc
| 38.920904
| 129
| 0.631441
|
6c3ceb8cf1cc0d98c5827893f858e3cfa7c4ce2a
| 3,833
|
py
|
Python
|
roles/lib_openshift/src/class/oc_scale.py
|
shgriffi/openshift-ansible
|
6313f519307cf50055589c3876d8bec398bbc4d4
|
[
"Apache-2.0"
] | 164
|
2015-07-29T17:35:04.000Z
|
2021-12-16T16:38:04.000Z
|
roles/lib_openshift/src/class/oc_scale.py
|
shgriffi/openshift-ansible
|
6313f519307cf50055589c3876d8bec398bbc4d4
|
[
"Apache-2.0"
] | 3,634
|
2015-06-09T13:49:15.000Z
|
2022-03-23T20:55:44.000Z
|
roles/lib_openshift/src/class/oc_scale.py
|
shgriffi/openshift-ansible
|
6313f519307cf50055589c3876d8bec398bbc4d4
|
[
"Apache-2.0"
] | 250
|
2015-06-08T19:53:11.000Z
|
2022-03-01T04:51:23.000Z
|
# pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class OCScale(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
resource_name,
namespace,
replicas,
kind,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCScale '''
super(OCScale, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.replicas = replicas
self.name = resource_name
self._resource = None
@property
def resource(self):
''' property function for resource var '''
if not self._resource:
self.get()
return self._resource
@resource.setter
def resource(self, data):
''' setter function for resource var '''
self._resource = data
def get(self):
'''return replicas information '''
vol = self._get(self.kind, self.name)
if vol['returncode'] == 0:
if self.kind == 'dc':
# The resource returned from a query could be an rc or dc.
# pylint: disable=redefined-variable-type
self.resource = DeploymentConfig(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
if self.kind == 'rc':
# The resource returned from a query could be an rc or dc.
# pylint: disable=redefined-variable-type
self.resource = ReplicationController(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
return vol
def put(self):
'''update replicas into dc '''
self.resource.update_replicas(self.replicas)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
def needs_update(self):
''' verify whether an update is needed '''
return self.resource.needs_update_replicas(self.replicas)
# pylint: disable=too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''perform the idempotent ansible logic'''
oc_scale = OCScale(params['name'],
params['namespace'],
params['replicas'],
params['kind'],
params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = oc_scale.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'result': api_rval['results'], 'state': 'list'} # noqa: E501
elif state == 'present':
########
# Update
########
if oc_scale.needs_update():
if check_mode:
return {'changed': True, 'result': 'CHECK_MODE: Would have updated.'} # noqa: E501
api_rval = oc_scale.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_scale.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'result': api_rval['results'], 'state': 'present'} # noqa: E501
return {'changed': False, 'result': api_rval['results'], 'state': 'present'} # noqa: E501
return {'failed': True, 'msg': 'Unknown state passed. [{}]'.format(state)}
| 35.165138
| 105
| 0.535351
|
f69b1523343f2156fd65e3d66d3463d85a8cbdf4
| 1,705
|
py
|
Python
|
gmaltapi/server.py
|
gmalt/api
|
c3d35c87564d21f8b7cd061923c155073b467d3d
|
[
"MIT"
] | null | null | null |
gmaltapi/server.py
|
gmalt/api
|
c3d35c87564d21f8b7cd061923c155073b467d3d
|
[
"MIT"
] | null | null | null |
gmaltapi/server.py
|
gmalt/api
|
c3d35c87564d21f8b7cd061923c155073b467d3d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# (c) 2017 Jonathan Bouzekri
#
# This file is part of the gmalt application
#
# MIT License :
# https://raw.githubusercontent.com/gmalt/api/master/LICENSE.txt
""" Provide a gevent server to serve gmalt API """
from gevent.pywsgi import WSGIServer
class GmaltServer(WSGIServer):
""" A gevent webserver API to request elevation data
:param handler: the handler instance to load elevation data from
latitude and longitude
:type handler: :class:`gmaltapi.handlers.files.Handler` or any class
implementing the `get_altitude` method
:param str host: host or ip binded to
:param int port: port binded to
:param str cors: optional CORS domains to enable CORS headers
"""
spec = {
'handler': 'string(default="file")',
'host': 'string(default="localhost")',
'port': 'integer(default=8088)',
'cors': 'string(default=None)',
'pool_size': 'integer(default=None)'
}
def __init__(self, handler, host, port, cors=None, **kwargs):
pool_size = kwargs.pop('pool_size') or 'default'
super(GmaltServer, self).__init__((host, port),
self._build_wsgi(handler, cors),
spawn=pool_size, **kwargs)
def _build_wsgi(self, handler, cors):
if cors:
from wsgicors import CORS
handler = CORS(handler, methods="GET, OPTIONS, POST", origin=cors)
return handler
def serve_forever(self, stop_timeout=None):
""" Start the server """
print('Serving on %s:%d' % self.address)
super(GmaltServer, self).serve_forever(stop_timeout=stop_timeout)
| 33.431373
| 78
| 0.62346
|
f0c6a443bc4623e19ce3a7ff0f1ed990870734da
| 3,053
|
py
|
Python
|
imagepy/menus/Plugins/StackReg/stackreg_plgs.py
|
BioinfoTongLI/imagepy
|
b86f33f20e872ee8b86471a9ddfbd5ad064fd64d
|
[
"BSD-4-Clause"
] | 2
|
2019-08-15T06:19:18.000Z
|
2021-10-09T15:51:57.000Z
|
imagepy/menus/Plugins/StackReg/stackreg_plgs.py
|
BioinfoTongLI/imagepy
|
b86f33f20e872ee8b86471a9ddfbd5ad064fd64d
|
[
"BSD-4-Clause"
] | null | null | null |
imagepy/menus/Plugins/StackReg/stackreg_plgs.py
|
BioinfoTongLI/imagepy
|
b86f33f20e872ee8b86471a9ddfbd5ad064fd64d
|
[
"BSD-4-Clause"
] | null | null | null |
from imagepy.core.engine import Filter, Simple
from imagepy import IPy
from pystackreg import StackReg
import numpy as np
import pandas as pd
from skimage import transform as tf
import scipy.ndimage as ndimg
from imagepy.core.manager import TableManager
class Register(Simple):
title = 'Stack Register'
note = ['8-bit', '16-bit', 'int', 'float', 'stack']
para = {'trans':'RIGID_BODY', 'ref':'previous', 'tab':False, 'new':'Inplace', 'diag':0, 'sigma':0}
view = [(list, 'trans', ['TRANSLATION', 'RIGID_BODY', 'SCALED_ROTATION', 'AFFINE', 'BILINEAR'], str, 'transform', ''),
(list, 'ref', ['previous', 'first', 'mean'], str, 'reference', ''),
(list, 'new', ['Inplace', 'New', 'None'], str, 'image', ''),
(int, 'diag', (0, 2048), 0, 'diagonal', 'scale'),
(float, 'sigma', (0,30), 1, 'sigma', 'blur'),
(bool, 'tab', 'show table')]
def run(self, ips, imgs, para = None):
k = para['diag']/np.sqrt((np.array(ips.img.shape)**2).sum())
size = tuple((np.array(ips.img.shape)*k).astype(np.int16))
IPy.set_info('down sample...')
news = []
for img in imgs:
if k!=0: img = tf.resize(img, size)
if para['sigma']!=0:
img = ndimg.gaussian_filter(img, para['sigma'])
news.append(img)
IPy.set_info('register...')
sr = StackReg(eval('StackReg.%s'%para['trans']))
sr.register_stack(np.array(news), reference=para['ref'])
mats = sr._tmats.reshape((sr._tmats.shape[0],-1))
if k!=0: mats[:,[0,1,3,4,6,7]] *= k
if k!=0: mats[:,[0,1,2,3,4,5]] /= k
if para['tab']: IPy.show_table(pd.DataFrame(
mats, columns=['A%d'%(i+1) for i in range(mats.shape[1])]), title='%s-Tmats'%ips.title)
if para['new'] == 'None': return
IPy.set_info('transform...')
for i in range(sr._tmats.shape[0]):
tform = tf.ProjectiveTransform(matrix=sr._tmats[i])
img = tf.warp(imgs[i], tform)
img -= imgs[i].min(); img *= imgs[i].max() - imgs[i].min()
if para['new'] == 'Inplace': imgs[i][:] = img
if para['new'] == 'New': news[i] = img.astype(ips.img.dtype)
self.progress(i, len(imgs))
if para['new'] == 'New': IPy.show_img(news, '%s-reg'%ips.title)
class Transform(Simple):
title = 'Register By Mats'
note = ['all']
para = {'mat':None, 'new':True}
view = [('tab', 'mat', 'transfrom', 'matrix'),
(bool, 'new', 'new image')]
def run(self, ips, imgs, para = None):
mats = TableManager.get(para['mat']).data.values
if len(imgs) != len(mats):
IPy.alert('image stack must has the same length as transfrom mats!')
return
newimgs = []
img = np.zeros_like(ips.img, dtype=np.float64)
for i in range(len(mats)):
tform = tf.ProjectiveTransform(matrix=mats[i].reshape((3,3)))
if imgs[i].ndim==2:
img[:] = tf.warp(imgs[i], tform)
else:
for c in range(img.shape[2]):
img[:,:,c] = tf.warp(imgs[i][:,:,c], tform)
img -= imgs[i].min(); img *= imgs[i].max() - imgs[i].min()
if para['new']: newimgs.append(img.astype(ips.img.dtype))
else: imgs[i] = img
self.progress(i, len(mats))
if para['new']: IPy.show_img(newimgs, '%s-trans'%ips.title)
plgs = [Register, Transform]
| 36.783133
| 120
| 0.612512
|
c176790393a1db4945c3d6a00cb554486ac76838
| 13,102
|
py
|
Python
|
cf_api/deploy_space.py
|
hsdp/python-cf-api
|
13fc605e2ea3b5c09cc8a556c58e8c36ae290c8c
|
[
"Apache-2.0"
] | 20
|
2018-01-19T20:19:02.000Z
|
2020-06-09T08:45:40.000Z
|
cf_api/deploy_space.py
|
hsdp/python-cf-api
|
13fc605e2ea3b5c09cc8a556c58e8c36ae290c8c
|
[
"Apache-2.0"
] | 4
|
2018-01-20T00:24:27.000Z
|
2020-03-16T01:26:27.000Z
|
cf_api/deploy_space.py
|
hsdp/python-cf-api
|
13fc605e2ea3b5c09cc8a556c58e8c36ae290c8c
|
[
"Apache-2.0"
] | 3
|
2020-02-19T22:56:50.000Z
|
2021-05-12T19:38:33.000Z
|
from __future__ import print_function
import json
from . import deploy_manifest
from . import deploy_service
from . import exceptions as exc
class Space(object):
"""This class provides support for working with a particular space. It
mainly provides convenience functions for deploying, fetching, and
destroying the space, apps, and services.
"""
_org = None
_space = None
_space_name = None
_debug = False
def __init__(self,
cc,
org_name=None,
org_guid=None,
space_name=None,
space_guid=None,
is_debug=None):
self.cc = cc
if space_guid:
self.set_space_guid(space_guid)
elif org_guid:
self.set_org_guid(org_guid)
elif org_name and space_name:
self.set_org(org_name).set_space(space_name)
elif org_name:
self.set_org(org_name)
if is_debug is not None:
self.set_debug(is_debug)
@property
def space(self):
"""Returns the currently set space
"""
if not self._space:
if not self._space_name:
raise exc.InvalidStateException('Space is not set.', 500)
else:
self.set_space(self._space_name)
return self._space
@property
def org(self):
"""Returns the currently set org
"""
if not self._org:
raise exc.InvalidStateException('Org is not set.', 500)
return self._org
def set_org(self, org_name):
"""Sets the organization name for this space
Args:
org_name (str): name of the organization
Returns:
space (Space): self
"""
res = self.cc.organizations().get_by_name(org_name)
self._org = res.resource
if self._org is None:
raise exc.InvalidStateException('Org not found.', 404)
return self
def set_space(self, space_name):
"""Sets the space name
Args:
space_name (str): name of the space
Returns:
space (Space): self
"""
if not self._org:
raise exc.InvalidStateException(
'Org is required to set the space name.', 500)
res = self.cc.request(self._org.spaces_url).get_by_name(space_name)
self._space = res.resource
self._space_name = space_name
return self
def set_org_guid(self, org_guid):
"""Sets and loads the organization by the given GUID
"""
res = self.cc.organizations(org_guid).get()
self._org = res.resource
return self
def set_space_guid(self, space_guid):
"""Sets the GUID of the space to be used in this deployment
Args:
space_guid (str): guid of the space
Returns:
self (Space)
"""
res = self.cc.spaces(space_guid).get()
self._space = res.resource
res = self.cc.request(self._space.organization_url).get()
self._org = res.resource
return self
def set_debug(self, debug):
"""Sets a debug flag on whether this client should print debug messages
Args:
debug (bool)
Returns:
self (Space)
"""
self._debug = debug
return self
def request(self, *urls):
"""Creates a request object with a base url (i.e. /v2/spaces/<id>)
"""
return self.cc.request(self._space['metadata']['url'], *urls)
def create(self, **params):
"""Creates the space
Keyword Args:
params: HTTP body args for the space create endpoint
"""
if not self._space:
res = self.cc.spaces().set_params(
name=self._space_name,
organization_guid=self._org.guid,
**params
).post()
self._space = res.resource
return self._space
def destroy(self, destroy_routes=False):
"""Destroys the space, and, optionally, any residual routes existing in
the space.
Keyword Args:
destroy_routes (bool): indicates if to destroy routes
"""
if not self._space:
raise exc.InvalidStateException(
'No space specified. Can\'t destroy.', 500)
route_results = []
if destroy_routes:
for r in self.get_routes():
res = self.cc.routes(r.guid).delete()
route_results.append(res.data)
res = self.cc.spaces(self._space.guid).delete()
self._space = None
return res.resource, route_results
def get_deploy_manifest(self, manifest_filename):
"""Parses the manifest deployment list and sets the org and space to be
used in deployment.
"""
self._assert_space()
app_deploys = deploy_manifest.Deploy\
.parse_manifest(manifest_filename, self.cc)
return [d.set_org_and_space_dicts(self._org, self._space)
.set_debug(self._debug) for d in app_deploys]
def get_deploy_service(self):
"""Returns a service deployment client with the org and space to be
used in deployment.
"""
self._assert_space()
return deploy_service.DeployService(self.cc)\
.set_debug(self._debug)\
.set_org_and_space_dicts(self._org, self._space)
def deploy_manifest(self, manifest_filename, **kwargs):
"""Deploys all apps in the given app manifest into this space.
Args:
manifest_filename (str): app manifest filename to be deployed
"""
return [m.push(**kwargs)
for m in self.get_deploy_manifest(manifest_filename)]
def wait_manifest(self, manifest_filename, interval=20, timeout=300,
tailing=False):
"""Waits for an app to start given a manifest filename.
Args:
manifest_filename (str): app manifest filename to be waited on
Keyword Args:
interval (int): how often to check if the app has started
timeout (int): how long to wait for the app to start
"""
app_deploys = self.get_deploy_manifest(manifest_filename)
deploy_manifest.Deploy.wait_for_apps_start(
app_deploys, interval, timeout, tailing=tailing)
def destroy_manifest(self, manifest_filename, destroy_routes=False):
"""Destroys all apps in the given app manifest in this space.
Args:
manifest_filename (str): app manifest filename to be destroyed
Keyword Args:
destroy_routes (bool): indicates whether to destroy routes
"""
return [m.destroy(destroy_routes)
for m in self.get_deploy_manifest(manifest_filename)]
def get_blue_green(self, manifest_filename, interval=20, timeout=300,
tailing=None, **kwargs):
"""Parses the manifest and searches for ``app_name``, returning an
instance of the BlueGreen deployer object.
Args:
manifest_filename (str)
interval (int)
timeout (int)
tailing (bool)
**kwargs (dict): are passed along to the BlueGreen constructor
Returns:
list[cf_api.deploy_blue_green.BlueGreen]
"""
from .deploy_blue_green import BlueGreen
if tailing is not None:
kwargs['verbose'] = tailing
elif 'verbose' not in kwargs:
kwargs['verbose'] = self._debug
kwargs['wait_kwargs'] = {'interval': interval, 'timeout': timeout}
return BlueGreen.parse_manifest(self, manifest_filename, **kwargs)
def deploy_blue_green(self, manifest_filename, **kwargs):
"""Deploys the application from the given manifest using the
BlueGreen deployment strategy
Args:
manifest_filename (str)
**kwargs (dict): are passed along to self.get_blue_green
Returns:
list
"""
return [m.deploy_app()
for m in self.get_blue_green(manifest_filename, **kwargs)]
def wait_blue_green(self, manifest_filename, **kwargs):
"""Waits for the application to start, from the given manifest using
the BlueGreen deployment strategy
Args:
manifest_filename (str)
**kwargs (dict): are passed along to self.get_blue_green
Returns:
list
"""
return [m.wait_and_cleanup()
for m in self.get_blue_green(manifest_filename, **kwargs)]
def get_service_instance_by_name(self, name):
"""Searches the space for a service instance with the name
"""
res = self.cc.request(self._space.service_instances_url)\
.get_by_name(name)
return res.resource
def get_app_by_name(self, name):
"""Searches the space for an app with the name
"""
res = self.cc.request(self._space.apps_url)\
.get_by_name(name)
return res.resource
def get_routes(self, host=None):
"""Searches the space for routes
"""
req = self.cc.spaces(self._space.guid, 'routes')
res = req.get_by_name(host, 'host') if host else req.get()
return res.resources
def _assert_space(self):
if not self._space:
raise exc.InvalidStateException('No space is set.', 500)
if '__main__' == __name__:
import argparse
import __init__ as cf_api
from getpass import getpass
def main():
args = argparse.ArgumentParser(
description='This tool performs Cloud Controller API requests '
'on behalf of a user in a given org/space. It may '
'be used to look up space specific resources such '
'as apps and services. It returns only the raw '
'JSON response from the Cloud Controller.')
args.add_argument(
'--cloud-controller', dest='cloud_controller', required=True,
help='The Cloud Controller API endpoint '
'(excluding leading slashes)')
args.add_argument(
'-u', '--user', dest='user', required=True,
help='The user used to authenticate. This may be omitted '
'if --client-id and --client-secret have sufficient '
'authorization to perform the desired request without a '
'user\'s permission')
args.add_argument(
'-o', '--org', dest='org', required=True,
help='The organization to be accessed')
args.add_argument(
'-s', '--space', dest='space', required=True,
help='The space to be accessed')
args.add_argument(
'--client-id', dest='client_id', default='cf',
help='Used to set a custom client ID')
args.add_argument(
'--client-secret', dest='client_secret', default='',
help='Secret corresponding to --client-id')
args.add_argument(
'--skip-ssl', dest='skip_ssl', action='store_true',
help='Indicates to skip SSL cert verification.')
args.add_argument(
'--show-org', dest='show_org', action='store_true',
help='Indicates to show the organization set in --org/-o')
args.add_argument(
'--list-all', dest='list_all', action='store_true',
help='Indicates to get all pages of resources matching the given '
'URL')
args.add_argument(
'--pretty', dest='pretty_print', action='store_true',
help='Indicates to pretty-print the resulting JSON')
args.add_argument(
'url', nargs='?',
help='The URL to be accessed relative to the space URL. This value'
' will be appended to the space URL indicated by -o and -s '
'(i.e. /spaces/<space_guid>/<url>)')
args = args.parse_args()
cc = cf_api.new_cloud_controller(
args.cloud_controller,
username=args.user,
password=getpass().strip() if args.user is not None else None,
client_id=args.client_id,
client_secret=args.client_secret,
verify_ssl=not args.skip_ssl,
init_doppler=True,
)
space = Space(
cc,
org_name=args.org,
space_name=args.space,
is_debug=True
)
dumps_kwargs = {}
if args.pretty_print:
dumps_kwargs['indent'] = 4
if args.url:
req = space.request(args.url)
if not args.list_all:
return print(req.get().text)
else:
res = cc.get_all_resources(req)
elif args.show_org:
res = space.org
else:
res = space.space
return print(json.dumps(res, **dumps_kwargs))
main()
| 33.594872
| 79
| 0.580675
|
aabd465b870b08d29074ad787f13c2f55c6db4bc
| 960
|
py
|
Python
|
run.py
|
thanhbok26b/mujoco-rewards-landscape-visualization
|
c1a95b38a0ea03468bbbb7ce013eff37ccd67101
|
[
"MIT"
] | null | null | null |
run.py
|
thanhbok26b/mujoco-rewards-landscape-visualization
|
c1a95b38a0ea03468bbbb7ce013eff37ccd67101
|
[
"MIT"
] | null | null | null |
run.py
|
thanhbok26b/mujoco-rewards-landscape-visualization
|
c1a95b38a0ea03468bbbb7ce013eff37ccd67101
|
[
"MIT"
] | null | null | null |
import os
import yaml
import pickle
from ars import ars
from mujoco_parallel import WorkerManager
from mujoco_parallel import MujocoParallel, benchmarks
results = []
def callback(res):
global results
results.append(res)
def main():
global results
config = yaml.load(open('config.yaml').read())
instance = config['instance']
benchmark = benchmarks[instance]
if not os.path.exists('data'):
os.mkdir('data')
if not os.path.exists('data/%s' % instance):
os.mkdir('data/%s' % instance)
# # Start workers
# wm = WorkerManager()
# wm.start_redis()
# wm.create_workers()
# Start master
mp = MujocoParallel(benchmark)
for i in range(config['repeat']):
results = []
ars(mp, config, callback)
obj = pickle.dumps(results, protocol=4)
with open('data/%s/%d.pkl' % (instance, i), 'wb') as fp:
fp.write(obj)
if __name__ == '__main__':
main()
| 22.325581
| 64
| 0.626042
|
3c7f90d2f04aa6d69159127b401effc466731f38
| 1,573
|
py
|
Python
|
back-end/Mechanisms/Blob/MinioBlobMechanism.py
|
cmillani/SPaaS
|
5c37f6f6583411c856e2cefa9e94971c472f30b5
|
[
"MIT"
] | null | null | null |
back-end/Mechanisms/Blob/MinioBlobMechanism.py
|
cmillani/SPaaS
|
5c37f6f6583411c856e2cefa9e94971c472f30b5
|
[
"MIT"
] | null | null | null |
back-end/Mechanisms/Blob/MinioBlobMechanism.py
|
cmillani/SPaaS
|
5c37f6f6583411c856e2cefa9e94971c472f30b5
|
[
"MIT"
] | null | null | null |
from minio import Minio
from minio.error import ResponseError
from .BlobConfiguration import *
import os
class BlobFile:
def __init__(self, name):
self.name = name
class MinioBlobMechanism:
def __init__(self):
self.minioClient = Minio(os.environ['MINIO_ENDPOINT'],
access_key=os.environ['MINIO_ACCESS_KEY'],
secret_key=os.environ['MINIO_SECRET_KEY'],
secure=False)
if not self.minioClient.bucket_exists(DataBlob):
self.minioClient.make_bucket(DataBlob)
if not self.minioClient.bucket_exists(ToolsBlob):
self.minioClient.make_bucket(ToolsBlob)
if not self.minioClient.bucket_exists(ResultsBlob):
self.minioClient.make_bucket(ResultsBlob)
def download_blob(self, container_name, blob_name):
return self.minioClient.get_object(container_name, blob_name)
def get_blob_to_path(self, container_name, blob_name, file_path):
self.minioClient.fget_object(container_name, blob_name, file_path)
def create_blob_from_path(self, container_name, blob_name, file_path):
self.minioClient.fput_object(container_name, blob_name, file_path)
def list_blobs(self, container_name):
blobObjects = self.minioClient.list_objects(container_name)
objects = [BlobFile(blobObject.object_name) for blobObject in blobObjects]
return objects
def delete_blob(self, container_name, blob_name):
self.minioClient.remove_object(container_name, blob_name)
| 39.325
| 82
| 0.702479
|
29fc30be65d4dab10be644481b5d420770dbcdf8
| 10,127
|
py
|
Python
|
example/ssd/symbol/legacy_vgg16_ssd_300.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 399
|
2017-05-30T05:12:48.000Z
|
2022-01-29T05:53:08.000Z
|
example/ssd/symbol/legacy_vgg16_ssd_300.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 187
|
2018-03-16T23:44:43.000Z
|
2021-12-14T21:19:54.000Z
|
example/ssd/symbol/legacy_vgg16_ssd_300.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 107
|
2017-05-30T05:53:22.000Z
|
2021-06-24T02:43:31.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from common import legacy_conv_act_layer
from common import multibox_layer
def get_symbol_train(num_classes=20, nms_thresh=0.5, force_suppress=False,
nms_topk=400, **kwargs):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is a training network with losses
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
data = mx.symbol.Variable(name="data")
label = mx.symbol.Variable(name="label")
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), \
pooling_convention="full", name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
pool5 = mx.symbol.Pooling(
data=relu5_3, pool_type="max", kernel=(3, 3), stride=(1, 1),
pad=(1,1), name="pool5")
# group 6
conv6 = mx.symbol.Convolution(
data=pool5, kernel=(3, 3), pad=(6, 6), dilate=(6, 6),
num_filter=1024, name="conv6")
relu6 = mx.symbol.Activation(data=conv6, act_type="relu", name="relu6")
# drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
conv7 = mx.symbol.Convolution(
data=relu6, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="conv7")
relu7 = mx.symbol.Activation(data=conv7, act_type="relu", name="relu7")
# drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
### ssd extra layers ###
conv8_1, relu8_1 = legacy_conv_act_layer(relu7, "8_1", 256, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv8_2, relu8_2 = legacy_conv_act_layer(relu8_1, "8_2", 512, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv9_1, relu9_1 = legacy_conv_act_layer(relu8_2, "9_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv9_2, relu9_2 = legacy_conv_act_layer(relu9_1, "9_2", 256, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv10_1, relu10_1 = legacy_conv_act_layer(relu9_2, "10_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv10_2, relu10_2 = legacy_conv_act_layer(relu10_1, "10_2", 256, kernel=(3,3), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv11_1, relu11_1 = legacy_conv_act_layer(relu10_2, "11_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv11_2, relu11_2 = legacy_conv_act_layer(relu11_1, "11_2", 256, kernel=(3,3), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
# specific parameters for VGG16 network
from_layers = [relu4_3, relu7, relu8_2, relu9_2, relu10_2, relu11_2]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = [20, -1, -1, -1, -1, -1]
steps = [ x / 300.0 for x in [8, 16, 32, 64, 100, 300]]
num_channels = [512]
loc_preds, cls_preds, anchor_boxes = multibox_layer(from_layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_channels, clip=False, interm_layer=0, steps=steps)
tmp = mx.symbol.contrib.MultiBoxTarget(
*[anchor_boxes, label, cls_preds], overlap_threshold=.5, \
ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \
negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2),
name="multibox_target")
loc_target = tmp[0]
loc_target_mask = tmp[1]
cls_target = tmp[2]
cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \
ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \
normalization='valid', name="cls_prob")
loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \
data=loc_target_mask * (loc_preds - loc_target), scalar=1.0)
loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \
normalization='valid', name="loc_loss")
# monitoring training status
cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label")
det = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
det = mx.symbol.MakeLoss(data=det, grad_scale=0, name="det_out")
# group output
out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det])
return out
def get_symbol(num_classes=20, nms_thresh=0.5, force_suppress=False,
nms_topk=400, **kwargs):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is the detection network
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
threshold of overlap for non-maximum suppression
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
net = get_symbol_train(num_classes)
cls_preds = net.get_internals()["multibox_cls_pred_output"]
loc_preds = net.get_internals()["multibox_loc_pred_output"]
anchor_boxes = net.get_internals()["multibox_anchors_output"]
cls_prob = mx.symbol.SoftmaxActivation(data=cls_preds, mode='channel', \
name='cls_prob')
out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out
| 48.454545
| 96
| 0.663079
|
9990339ad0266bf222265cf46651bfb024921aed
| 265
|
py
|
Python
|
frappe_training/frappe_training/doctype/salary_detail/salary_detail.py
|
sivaranjanipalanivel/training
|
b177c56a319c07dc3467ce3113e332ecee9b81fa
|
[
"MIT"
] | null | null | null |
frappe_training/frappe_training/doctype/salary_detail/salary_detail.py
|
sivaranjanipalanivel/training
|
b177c56a319c07dc3467ce3113e332ecee9b81fa
|
[
"MIT"
] | null | null | null |
frappe_training/frappe_training/doctype/salary_detail/salary_detail.py
|
sivaranjanipalanivel/training
|
b177c56a319c07dc3467ce3113e332ecee9b81fa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, valiantsystems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class SalaryDetail(Document):
pass
| 24.090909
| 53
| 0.781132
|
7abf12cb436fed17e9bd170452989d83f41f56eb
| 395
|
py
|
Python
|
QACTPBeeBroker/sub.py
|
kmmao/QACTPBeeBroker
|
2a39a06a1912aec041c45dc6577d017e5e637e34
|
[
"MIT"
] | 16
|
2019-07-03T05:56:27.000Z
|
2022-03-30T10:15:43.000Z
|
QACTPBeeBroker/sub.py
|
kmmao/QACTPBeeBroker
|
2a39a06a1912aec041c45dc6577d017e5e637e34
|
[
"MIT"
] | 3
|
2019-09-14T05:33:05.000Z
|
2020-07-16T01:10:52.000Z
|
QACTPBeeBroker/sub.py
|
kmmao/QACTPBeeBroker
|
2a39a06a1912aec041c45dc6577d017e5e637e34
|
[
"MIT"
] | 13
|
2019-07-07T18:16:07.000Z
|
2022-03-26T15:59:33.000Z
|
from QAPUBSUB.consumer import subscriber_routing
from QACTPBeeBroker.setting import eventmq_ip
import click
@click.command()
@click.option('--code', default='rb1910')
def sub(code):
x = subscriber_routing(host=eventmq_ip, exchange='CTPX', routing_key=code)
import json
def callback(a, b, c, data):
print(json.loads(data))
x.callback = callback
x.start()
sub()
| 18.809524
| 78
| 0.703797
|
3e1d757dca44efb2b66f71eef989b3f87132b34b
| 5,938
|
py
|
Python
|
toontown/ai/NewsManagerAI.py
|
LittleNed/toontown-stride
|
1252a8f9a8816c1810106006d09c8bdfe6ad1e57
|
[
"Apache-2.0"
] | 3
|
2020-01-02T08:43:36.000Z
|
2020-07-05T08:59:02.000Z
|
toontown/ai/NewsManagerAI.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | null | null | null |
toontown/ai/NewsManagerAI.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | 4
|
2019-06-20T23:45:23.000Z
|
2020-10-14T20:30:15.000Z
|
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.toonbase import ToontownGlobals
from otp.ai.MagicWordGlobal import *
from HolidayGlobals import *
class NewsManagerAI(DistributedObjectAI):
notify = directNotify.newCategory('NewsManagerAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.air = air
self.holidayList = []
self.weeklyHolidays = WEEKLY_HOLIDAYS
self.yearlyHolidays = YEARLY_HOLIDAYS
self.oncelyHolidays = ONCELY_HOLIDAYS
def announceGenerate(self):
DistributedObjectAI.announceGenerate(self)
self.accept('avatarEntered', self.__handleAvatarEntered)
def __handleAvatarEntered(self, avatar):
if self.air.suitInvasionManager.getInvading():
self.air.suitInvasionManager.notifyInvasionBulletin(avatar.getDoId())
if self.air.holidayManager.isHolidayRunning(MORE_XP_HOLIDAY):
self.sendUpdateToAvatarId(avatar.getDoId(), 'setMoreXpHolidayOngoing', [])
if self.air.holidayManager.isHolidayRunning(TROLLEY_HOLIDAY):
self.sendUpdateToAvatarId(avatar.getDoId(), 'holidayNotify', [])
if self.air.holidayManager.isHolidayRunning(CIRCUIT_RACING_EVENT):
self.sendUpdateToAvatarId(avatar.getDoId(), 'startHoliday', [CIRCUIT_RACING_EVENT])
if self.air.holidayManager.isHolidayRunning(HYDRANT_ZERO_HOLIDAY):
self.sendUpdateToAvatarId(avatar.getDoId(), 'startHoliday', [HYDRANT_ZERO_HOLIDAY])
def setPopulation(self, todo0):
pass
def setBingoWin(self, avatar, zoneId):
self.sendUpdateToAvatarId(avatar.getDoId(), 'setBingoWin', [zoneId])
def setBingoStart(self):
self.sendUpdate('setBingoStart', [])
def setBingoOngoing(self):
self.sendUpdate('setBingoOngoing', [])
def setBingoEnd(self):
self.sendUpdate('setBingoEnd', [])
def setCircuitRaceStart(self):
self.sendUpdate('setCircuitRaceStart', [])
def setCircuitRaceOngoing(self):
self.sendUpdate('setCircuitRaceOngoing', [])
def setCircuitRaceEnd(self):
self.sendUpdate('setCircuitRaceEnd', [])
def setTrolleyHolidayStart(self):
self.sendUpdate('setTrolleyHolidayStart', [])
def setTrolleyHolidayOngoing(self):
self.sendUpdate('setTrolleyHolidayOngoing', [])
def setTrolleyHolidayEnd(self):
self.sendUpdate('setTrolleyHolidayEnd', [])
def setTrolleyWeekendStart(self):
self.sendUpdate('setTrolleyWeekendStart', [])
def setTrolleyWeekendOngoing(self):
self.sendUpdate('setTrolleyWeekendOngoing', [])
def setTrolleyWeekendEnd(self):
self.sendUpdate('setTrolleyWeekendEnd', [])
def setRoamingTrialerWeekendStart(self):
self.sendUpdate('setRoamingTrialerWeekendStart', [])
def setRoamingTrialerWeekendOngoing(self):
self.sendUpdate('setRoamingTrialerWeekendOngoing', [])
def setRoamingTrialerWeekendEnd(self):
self.sendUpdate('setRoamingTrialerWeekendEnd', [])
def setSellbotNerfHolidayStart(self):
self.sendUpdate('setSellbotNerfHolidayStart', [])
def setSellbotNerfHolidayEnd(self):
self.sendUpdate('setSellbotNerfHolidayEnd', [])
def setMoreXpHolidayStart(self):
self.sendUpdate('setMoreXpHolidayStart', [])
def setMoreXpHolidayOngoing(self):
self.sendUpdate('setMoreXpHolidayOngoing', [])
def setMoreXpHolidayEnd(self):
self.sendUpdate('setMoreXpHolidayEnd', [])
def setInvasionStatus(self, msgType, cogType, numRemaining, skeleton):
self.sendUpdate('setInvasionStatus', args=[msgType, cogType, numRemaining, skeleton])
def d_setHolidayIdList(self, holidays):
self.sendUpdate('setHolidayIdList', holidays)
def holidayNotify(self):
self.sendUpdate('holidayNotify', [])
def d_setWeeklyCalendarHolidays(self, weeklyHolidays):
self.sendUpdate('setWeeklyCalendarHolidays', [weeklyHolidays])
def getWeeklyCalendarHolidays(self):
return self.weeklyHolidays
def d_setYearlyCalendarHolidays(self, yearlyHolidays):
self.sendUpdate('setYearlyCalendarHolidays', [yearlyHolidays])
def getYearlyCalendarHolidays(self):
return self.yearlyHolidays
def setOncelyCalendarHolidays(self, oncelyHolidays):
self.sendUpdate('setOncelyCalendarHolidays', [oncelyHolidays])
def getOncelyCalendarHolidays(self):
return self.oncelyHolidays
def setRelativelyCalendarHolidays(self, relatHolidays):
self.sendUpdate('setRelativelyCalendarHolidays', [relatHolidays])
def getRelativelyCalendarHolidays(self):
return []
def setMultipleStartHolidays(self, multiHolidays):
self.sendUpdate('setMultipleStartHolidays', [multiHolidays])
def getMultipleStartHolidays(self):
return []
def sendSystemMessage(self, message, style):
self.sendUpdate('sendSystemMessage', [message, style])
def sendSystemMessageToAvatar(self, avatar, message, style):
self.sendUpdateToAvatarId(avatar.getDoId(), 'sendSystemMessage', [message, style])
@magicWord(category=CATEGORY_PROGRAMMER, types=[int])
def startHoliday(holidayId):
simbase.air.newsManager.setHolidayIdList([holidayId])
return 'Successfully set holiday to %d.' % (holidayId)
@magicWord(category=CATEGORY_PROGRAMMER, types=[int])
def addHoliday(holidayId):
simbase.air.newsManager.addHolidayId(holidayId)
return 'Successfully added holiday %d to ongoing holidays!' % (holidayId)
@magicWord(category=CATEGORY_PROGRAMMER, types=[int])
def removeHoliday(holidayId):
simbase.air.newsManager.removeHolidayId(holidayId)
return 'Successfully removed holiday %d from ongoing holidays!' % (holidayId)
| 37.1125
| 95
| 0.71775
|
da4faa5eb503d098c6e27c088cc6b050f4156887
| 24,594
|
py
|
Python
|
tests/checks/mock/test_spark.py
|
takus/dd-agent
|
3029873135f0f55c1bcdf3f825691aafca5abf97
|
[
"BSD-3-Clause"
] | 2
|
2018-01-31T03:50:55.000Z
|
2018-01-31T03:51:04.000Z
|
tests/checks/mock/test_spark.py
|
takus/dd-agent
|
3029873135f0f55c1bcdf3f825691aafca5abf97
|
[
"BSD-3-Clause"
] | null | null | null |
tests/checks/mock/test_spark.py
|
takus/dd-agent
|
3029873135f0f55c1bcdf3f825691aafca5abf97
|
[
"BSD-3-Clause"
] | null | null | null |
# stdlib
from urlparse import urljoin
# 3rd party
import mock
import json
from tests.checks.common import AgentCheckTest, Fixtures
# IDs
YARN_APP_ID = 'application_1459362484344_0011'
SPARK_APP_ID = 'app_001'
CLUSTER_NAME = 'SparkCluster'
APP_NAME = 'PySparkShell'
# URLs for cluster managers
SPARK_APP_URL = 'http://localhost:4040'
SPARK_YARN_URL = 'http://localhost:8088'
SPARK_MESOS_URL = 'http://localhost:5050'
STANDALONE_URL = 'http://localhost:8080'
# URL Paths
SPARK_REST_PATH = 'api/v1/applications'
YARN_APPS_PATH = 'ws/v1/cluster/apps'
MESOS_APPS_PATH = 'frameworks'
STANDALONE_APPS_PATH = 'json/'
STANDALONE_APP_PATH_HTML = 'app/'
# Service Check Names
SPARK_SERVICE_CHECK = 'spark.application_master.can_connect'
YARN_SERVICE_CHECK = 'spark.resource_manager.can_connect'
MESOS_SERVICE_CHECK = 'spark.mesos_master.can_connect'
STANDALONE_SERVICE_CHECK = 'spark.standalone_master.can_connect'
def join_url_dir(url, *args):
'''
Join a URL with multiple directories
'''
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
# YARN Service URLs
YARN_APP_URL = urljoin(SPARK_YARN_URL, YARN_APPS_PATH) + '?states=RUNNING&applicationTypes=SPARK'
YARN_SPARK_APP_URL = join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH)
YARN_SPARK_JOB_URL = join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'jobs')
YARN_SPARK_STAGE_URL = join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'stages')
YARN_SPARK_EXECUTOR_URL = join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'executors')
YARN_SPARK_RDD_URL = join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd')
# Mesos Service URLs
MESOS_APP_URL = urljoin(SPARK_MESOS_URL, MESOS_APPS_PATH)
MESOS_SPARK_APP_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH)
MESOS_SPARK_JOB_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'jobs')
MESOS_SPARK_STAGE_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'stages')
MESOS_SPARK_EXECUTOR_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'executors')
MESOS_SPARK_RDD_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd')
# Spark Standalone Service URLs
STANDALONE_APP_URL = urljoin(STANDALONE_URL, STANDALONE_APPS_PATH)
STANDALONE_APP_HTML_URL = urljoin(STANDALONE_URL, STANDALONE_APP_PATH_HTML) + '?appId=' + SPARK_APP_ID
STANDALONE_SPARK_APP_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH)
STANDALONE_SPARK_JOB_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'jobs')
STANDALONE_SPARK_STAGE_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'stages')
STANDALONE_SPARK_EXECUTOR_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'executors')
STANDALONE_SPARK_RDD_URL = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd')
STANDALONE_SPARK_JOB_URL_PRE20 = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'jobs')
STANDALONE_SPARK_STAGE_URL_PRE20 = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'stages')
STANDALONE_SPARK_EXECUTOR_URL_PRE20 = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'executors')
STANDALONE_SPARK_RDD_URL_PRE20 = join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'storage/rdd')
def yarn_requests_get_mock(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
if args[0] == YARN_APP_URL:
with open(Fixtures.file('yarn_apps'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == YARN_SPARK_APP_URL:
with open(Fixtures.file('spark_apps'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == YARN_SPARK_JOB_URL:
with open(Fixtures.file('job_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == YARN_SPARK_STAGE_URL:
with open(Fixtures.file('stage_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == YARN_SPARK_EXECUTOR_URL:
with open(Fixtures.file('executor_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == YARN_SPARK_RDD_URL:
with open(Fixtures.file('rdd_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
def mesos_requests_get_mock(*args, **kwargs):
class MockMesosResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
if args[0] == MESOS_APP_URL:
with open(Fixtures.file('mesos_apps'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif args[0] == MESOS_SPARK_APP_URL:
with open(Fixtures.file('spark_apps'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif args[0] == MESOS_SPARK_JOB_URL:
with open(Fixtures.file('job_metrics'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif args[0] == MESOS_SPARK_STAGE_URL:
with open(Fixtures.file('stage_metrics'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif args[0] == MESOS_SPARK_EXECUTOR_URL:
with open(Fixtures.file('executor_metrics'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif args[0] == MESOS_SPARK_RDD_URL:
with open(Fixtures.file('rdd_metrics'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
def standalone_requests_get_mock(*args, **kwargs):
class MockStandaloneResponse:
text = ''
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.text = json_data
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
if args[0] == STANDALONE_APP_URL:
with open(Fixtures.file('spark_standalone_apps'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_APP_HTML_URL:
with open(Fixtures.file('spark_standalone_app'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_APP_URL:
with open(Fixtures.file('spark_apps'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_JOB_URL:
with open(Fixtures.file('job_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_STAGE_URL:
with open(Fixtures.file('stage_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_EXECUTOR_URL:
with open(Fixtures.file('executor_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_RDD_URL:
with open(Fixtures.file('rdd_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
def standalone_requests_pre20_get_mock(*args, **kwargs):
class MockStandaloneResponse:
text = ''
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.text = json_data
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
if args[0] == STANDALONE_APP_URL:
with open(Fixtures.file('spark_standalone_apps'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_APP_HTML_URL:
with open(Fixtures.file('spark_standalone_app'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_APP_URL:
with open(Fixtures.file('spark_apps_pre20'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_JOB_URL:
return MockStandaloneResponse("{}", 404)
elif args[0] == STANDALONE_SPARK_STAGE_URL:
return MockStandaloneResponse("{}", 404)
elif args[0] == STANDALONE_SPARK_EXECUTOR_URL:
return MockStandaloneResponse("{}", 404)
elif args[0] == STANDALONE_SPARK_RDD_URL:
return MockStandaloneResponse("{}", 404)
elif args[0] == STANDALONE_SPARK_JOB_URL_PRE20:
with open(Fixtures.file('job_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_STAGE_URL_PRE20:
with open(Fixtures.file('stage_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_EXECUTOR_URL_PRE20:
with open(Fixtures.file('executor_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif args[0] == STANDALONE_SPARK_RDD_URL_PRE20:
with open(Fixtures.file('rdd_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
class SparkCheck(AgentCheckTest):
CHECK_NAME = 'spark'
YARN_CONFIG = {
'spark_url': 'http://localhost:8088',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_yarn_mode'
}
MESOS_CONFIG = {
'spark_url': 'http://localhost:5050',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_mesos_mode'
}
STANDALONE_CONFIG = {
'spark_url': 'http://localhost:8080',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode'
}
STANDALONE_CONFIG_PRE_20 = {
'spark_url': 'http://localhost:8080',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode',
'spark_pre_20_mode': 'true'
}
SPARK_JOB_RUNNING_METRIC_VALUES = {
'spark.job.count': 2,
'spark.job.num_tasks': 20,
'spark.job.num_active_tasks': 30,
'spark.job.num_completed_tasks': 40,
'spark.job.num_skipped_tasks': 50,
'spark.job.num_failed_tasks': 60,
'spark.job.num_active_stages': 70,
'spark.job.num_completed_stages': 80,
'spark.job.num_skipped_stages': 90,
'spark.job.num_failed_stages': 100
}
SPARK_JOB_RUNNING_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'status:running',
]
SPARK_JOB_SUCCEEDED_METRIC_VALUES = {
'spark.job.count': 3,
'spark.job.num_tasks': 1000,
'spark.job.num_active_tasks': 2000,
'spark.job.num_completed_tasks': 3000,
'spark.job.num_skipped_tasks': 4000,
'spark.job.num_failed_tasks': 5000,
'spark.job.num_active_stages': 6000,
'spark.job.num_completed_stages': 7000,
'spark.job.num_skipped_stages': 8000,
'spark.job.num_failed_stages': 9000
}
SPARK_JOB_SUCCEEDED_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'status:succeeded',
]
SPARK_STAGE_RUNNING_METRIC_VALUES = {
'spark.stage.count': 3,
'spark.stage.num_active_tasks': 3*3,
'spark.stage.num_complete_tasks': 4*3,
'spark.stage.num_failed_tasks': 5*3,
'spark.stage.executor_run_time': 6*3,
'spark.stage.input_bytes': 7*3,
'spark.stage.input_records': 8*3,
'spark.stage.output_bytes': 9*3,
'spark.stage.output_records': 10*3,
'spark.stage.shuffle_read_bytes': 11*3,
'spark.stage.shuffle_read_records': 12*3,
'spark.stage.shuffle_write_bytes': 13*3,
'spark.stage.shuffle_write_records': 14*3,
'spark.stage.memory_bytes_spilled': 15*3,
'spark.stage.disk_bytes_spilled': 16*3,
}
SPARK_STAGE_RUNNING_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'status:running',
]
SPARK_STAGE_COMPLETE_METRIC_VALUES = {
'spark.stage.count': 2,
'spark.stage.num_active_tasks': 100*2,
'spark.stage.num_complete_tasks': 101*2,
'spark.stage.num_failed_tasks': 102*2,
'spark.stage.executor_run_time': 103*2,
'spark.stage.input_bytes': 104*2,
'spark.stage.input_records': 105*2,
'spark.stage.output_bytes': 106*2,
'spark.stage.output_records': 107*2,
'spark.stage.shuffle_read_bytes': 108*2,
'spark.stage.shuffle_read_records': 109*2,
'spark.stage.shuffle_write_bytes': 110*2,
'spark.stage.shuffle_write_records': 111*2,
'spark.stage.memory_bytes_spilled': 112*2,
'spark.stage.disk_bytes_spilled': 113*2,
}
SPARK_STAGE_COMPLETE_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'status:complete',
]
SPARK_DRIVER_METRIC_VALUES = {
'spark.driver.rdd_blocks': 99,
'spark.driver.memory_used': 98,
'spark.driver.disk_used': 97,
'spark.driver.active_tasks': 96,
'spark.driver.failed_tasks': 95,
'spark.driver.completed_tasks': 94,
'spark.driver.total_tasks': 93,
'spark.driver.total_duration': 92,
'spark.driver.total_input_bytes': 91,
'spark.driver.total_shuffle_read': 90,
'spark.driver.total_shuffle_write': 89,
'spark.driver.max_memory': 278019440,
}
SPARK_EXECUTOR_METRIC_VALUES = {
'spark.executor.count': 2,
'spark.executor.rdd_blocks': 1,
'spark.executor.memory_used': 2,
'spark.executor.disk_used': 3,
'spark.executor.active_tasks': 4,
'spark.executor.failed_tasks': 5,
'spark.executor.completed_tasks': 6,
'spark.executor.total_tasks': 7,
'spark.executor.total_duration': 8,
'spark.executor.total_input_bytes': 9,
'spark.executor.total_shuffle_read': 10,
'spark.executor.total_shuffle_write': 11,
'spark.executor.max_memory': 555755765,
}
SPARK_RDD_METRIC_VALUES = {
'spark.rdd.count': 1,
'spark.rdd.num_partitions': 2,
'spark.rdd.num_cached_partitions': 2,
'spark.rdd.memory_used': 284,
'spark.rdd.disk_used': 0,
}
SPARK_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME
]
@mock.patch('requests.get', side_effect=yarn_requests_get_mock)
def test_yarn(self, mock_requests):
config = {
'instances': [self.YARN_CONFIG]
}
self.run_check(config)
# Check the running job metrics
for metric, value in self.SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the succeeded job metrics
for metric, value in self.SPARK_JOB_SUCCEEDED_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_SUCCEEDED_METRIC_TAGS)
# Check the running stage metrics
for metric, value in self.SPARK_STAGE_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_STAGE_RUNNING_METRIC_TAGS)
# Check the complete stage metrics
for metric, value in self.SPARK_STAGE_COMPLETE_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_STAGE_COMPLETE_METRIC_TAGS)
# Check the driver metrics
for metric, value in self.SPARK_DRIVER_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the executor metrics
for metric, value in self.SPARK_EXECUTOR_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the RDD metrics
for metric, value in self.SPARK_RDD_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the service tests
self.assertServiceCheckOK(YARN_SERVICE_CHECK,
tags=['url:http://localhost:8088'])
self.assertServiceCheckOK(SPARK_SERVICE_CHECK,
tags=['url:http://localhost:8088'])
@mock.patch('requests.get', side_effect=mesos_requests_get_mock)
def test_mesos(self, mock_requests):
config = {
'instances': [self.MESOS_CONFIG]
}
self.run_check(config)
# Check the running job metrics
for metric, value in self.SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the running job metrics
for metric, value in self.SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the succeeded job metrics
for metric, value in self.SPARK_JOB_SUCCEEDED_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_SUCCEEDED_METRIC_TAGS)
# Check the running stage metrics
for metric, value in self.SPARK_STAGE_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_STAGE_RUNNING_METRIC_TAGS)
# Check the complete stage metrics
for metric, value in self.SPARK_STAGE_COMPLETE_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_STAGE_COMPLETE_METRIC_TAGS)
# Check the driver metrics
for metric, value in self.SPARK_DRIVER_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the executor metrics
for metric, value in self.SPARK_EXECUTOR_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the RDD metrics
for metric, value in self.SPARK_RDD_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the service tests
self.assertServiceCheckOK(MESOS_SERVICE_CHECK,
tags=['url:http://localhost:5050'])
self.assertServiceCheckOK(SPARK_SERVICE_CHECK,
tags=['url:http://localhost:4040'])
@mock.patch('requests.get', side_effect=standalone_requests_get_mock)
def test_standalone(self, mock_requests):
config = {
'instances': [self.STANDALONE_CONFIG]
}
self.run_check(config)
# Check the running job metrics
for metric, value in self.SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the running job metrics
for metric, value in self.SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the succeeded job metrics
for metric, value in self.SPARK_JOB_SUCCEEDED_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_SUCCEEDED_METRIC_TAGS)
# Check the running stage metrics
for metric, value in self.SPARK_STAGE_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_STAGE_RUNNING_METRIC_TAGS)
# Check the complete stage metrics
for metric, value in self.SPARK_STAGE_COMPLETE_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_STAGE_COMPLETE_METRIC_TAGS)
# Check the driver metrics
for metric, value in self.SPARK_DRIVER_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the executor metrics
for metric, value in self.SPARK_EXECUTOR_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the RDD metrics
for metric, value in self.SPARK_RDD_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the service tests
self.assertServiceCheckOK(STANDALONE_SERVICE_CHECK,
tags=['url:http://localhost:8080'])
self.assertServiceCheckOK(SPARK_SERVICE_CHECK,
tags=['url:http://localhost:4040'])
@mock.patch('requests.get', side_effect=standalone_requests_pre20_get_mock)
def test_standalone_pre20(self, mock_requests):
config = {
'instances': [self.STANDALONE_CONFIG_PRE_20],
}
self.run_check(config)
# Check the running job metrics
for metric, value in self.SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the running job metrics
for metric, value in self.SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the succeeded job metrics
for metric, value in self.SPARK_JOB_SUCCEEDED_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_JOB_SUCCEEDED_METRIC_TAGS)
# Check the running stage metrics
for metric, value in self.SPARK_STAGE_RUNNING_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_STAGE_RUNNING_METRIC_TAGS)
# Check the complete stage metrics
for metric, value in self.SPARK_STAGE_COMPLETE_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_STAGE_COMPLETE_METRIC_TAGS)
# Check the driver metrics
for metric, value in self.SPARK_DRIVER_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the executor metrics
for metric, value in self.SPARK_EXECUTOR_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the RDD metrics
for metric, value in self.SPARK_RDD_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.SPARK_METRIC_TAGS)
# Check the service tests
self.assertServiceCheckOK(STANDALONE_SERVICE_CHECK,
tags=['url:http://localhost:8080'])
self.assertServiceCheckOK(SPARK_SERVICE_CHECK,
tags=['url:http://localhost:4040'])
| 36.220913
| 120
| 0.643938
|
22417c7c69b368322a756198a093dd15dd5f091f
| 17,325
|
py
|
Python
|
src/mainWithTensorboard.py
|
jacobbettencourt/comp766_project
|
d044d042adfe8c54e88d7f759fe16854cf1bb1a2
|
[
"MIT"
] | null | null | null |
src/mainWithTensorboard.py
|
jacobbettencourt/comp766_project
|
d044d042adfe8c54e88d7f759fe16854cf1bb1a2
|
[
"MIT"
] | null | null | null |
src/mainWithTensorboard.py
|
jacobbettencourt/comp766_project
|
d044d042adfe8c54e88d7f759fe16854cf1bb1a2
|
[
"MIT"
] | null | null | null |
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torch.utils.tensorboard import SummaryWriter
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='vgg16',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--runsavedir', default='', type=str, metavar='RUNDIR',
help='Path to save run information')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
model.fc = nn.Linear(2048,67,bias=True)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.Adam(model.parameters())#, args.lr,
#momentum=args.momentum,
#weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
#transforms.RandomResizedCrop(224),
transforms.Resize((224,224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
#transforms.Resize(256),
#transforms.CenterCrop(224),
transforms.Resize((224,224)),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
writer = SummaryWriter('runs/' + args.runsavedir)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
top1, top5, tLoss = train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1, acc5 = validate(val_loader, model, criterion, args)
writer.add_scalar('Top 1 Train', top1, epoch)
writer.add_scalar('Top 5 Train', top5, epoch)
writer.add_scalar('Training Loss (Average)', tLoss, epoch)
writer.add_scalar('Top 1 Val', acc1, epoch)
writer.add_scalar('Top 5 Val', acc5, epoch)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
writer.close()
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
return top1.avg, top5.avg, losses.avg
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 38.845291
| 91
| 0.60785
|
6a7f1568f68bedca990378fb64cf5f81bd14e1db
| 1,430
|
py
|
Python
|
ambulance/signals.py
|
shubhamkulkarni01/EMSTrack-Django
|
32ff9ed94a38730c0e9f6385c75060e2d30a930e
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2020-07-16T01:44:54.000Z
|
2020-10-25T02:08:47.000Z
|
ambulance/signals.py
|
shubhamkulkarni01/EMSTrack-Django
|
32ff9ed94a38730c0e9f6385c75060e2d30a930e
|
[
"MIT",
"BSD-3-Clause"
] | 8
|
2020-04-20T22:13:56.000Z
|
2022-02-04T17:50:44.000Z
|
ambulance/signals.py
|
shubhamkulkarni01/EMSTrack-Django
|
32ff9ed94a38730c0e9f6385c75060e2d30a930e
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2020-07-20T23:39:44.000Z
|
2022-02-24T00:29:10.000Z
|
import logging
from django.db.models.signals import post_save, m2m_changed
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from emstrack.sms import client
from .models import Call
logger = logging.getLogger(__name__)
# Add signal to automatically clear cache when group permissions change
@receiver(m2m_changed, sender=Call.sms_notifications.through)
def user_groups_changed_handler(sender, instance, action,
reverse, model, pk_set, **kwargs):
if action == 'post_add' or action == 'post_remove':
# get call and users
if reverse:
# call was added to user
call = Call.objects.get(id=pk_set[0])
users = {instance.id}
else:
# user was added to call
call = instance
users = []
for id in pk_set:
users.append(User.objects.get(id=id))
# create message
if action == 'post_add':
message = _("You will be notified of updates to")
else: # if action == 'post_remove':
message = _("You will no longer be notified of updates to")
message = "{}:\n* {} {}".format(message, _("Call"), call.to_string())
# notify users
for user in users:
client.notify_user(user, message)
| 31.777778
| 77
| 0.627273
|
d9fc0dab844f1705d7ebd633fc775d27b1823175
| 4,996
|
py
|
Python
|
python/example_code/pinpoint-email/pinpoint_send_email_message_email_api.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | 3
|
2021-01-19T20:23:17.000Z
|
2021-01-19T21:38:59.000Z
|
python/example_code/pinpoint-email/pinpoint_send_email_message_email_api.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | null | null | null |
python/example_code/pinpoint-email/pinpoint_send_email_message_email_api.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | 2
|
2019-12-27T13:58:00.000Z
|
2020-05-21T18:35:40.000Z
|
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# snippet-sourcedescription:[pinpoint_send_email_message_email_api demonstrates how to send a transactional email message by using the SendEmail operation in the Amazon Pinpoint Email API.]
# snippet-service:[mobiletargeting]
# snippet-keyword:[Python]
# snippet-keyword:[Amazon Pinpoint Email API]
# snippet-keyword:[Code Sample]
# snippet-keyword:[SendEmail]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-20]
# snippet-sourceauthor:[AWS]
# snippet-start:[pinpoint.python.pinpoint_send_email_message_email_api.complete]
import boto3
from botocore.exceptions import ClientError
# The AWS Region that you want to use to send the email. For a list of
# AWS Regions where the Amazon Pinpoint Email API is available, see
# https://docs.aws.amazon.com/pinpoint-email/latest/APIReference
AWS_REGION = "us-west-2"
# The "From" address. This address has to be verified in
# Amazon Pinpoint in the region you're using to send email.
SENDER = "Mary Major <sender@example.com>"
# The addresses on the "To" line. If your Amazon Pinpoint account is in
# the sandbox, these addresses also have to be verified.
TOADDRESSES = ["recipient@example.com"]
# CC and BCC addresses. If your account is in the sandbox, these
# addresses have to be verified.
CCADDRESSES = ["cc_recipient1@example.com", "cc_recipient2@example.com"]
BCCADDRESSES = ["bcc_recipient@example.com"]
# The configuration set that you want to use to send the email.
CONFIGURATION_SET = "ConfigSet"
# The subject line of the email.
SUBJECT = "Amazon Pinpoint Test (SDK for Python)"
# The body of the email for recipients whose email clients don't support HTML
# content.
BODY_TEXT = """Amazon Pinpoint Test (SDK for Python)
-------------------------------------
This email was sent with Amazon Pinpoint using the AWS SDK for Python.
For more information, see https:#aws.amazon.com/sdk-for-python/
"""
# The body of the email for recipients whose email clients can display HTML
# content.
BODY_HTML = """<html>
<head></head>
<body>
<h1>Amazon Pinpoint Test (SDK for Python)</h1>
<p>This email was sent with
<a href='https:#aws.amazon.com/pinpoint/'>Amazon Pinpoint</a> using the
<a href='https:#aws.amazon.com/sdk-for-python/'>
AWS SDK for Python</a>.</p>
</body>
</html>
"""
# The message tags that you want to apply to the email.
TAG0 = {'Name': 'key0', 'Value': 'value0'}
TAG1 = {'Name': 'key1', 'Value': 'value1'}
# The character encoding that you want to use for the subject line and message
# body of the email.
CHARSET = "UTF-8"
# Create a new Pinpoint resource and specify a region.
client = boto3.client('pinpoint-email', region_name=AWS_REGION)
# Send the email.
try:
# Create a request to send the email. The request contains all of the
# message attributes and content that were defined earlier.
response = client.send_email(
FromEmailAddress=SENDER,
# An object that contains all of the email addresses that you want to
# send the message to. You can send a message to up to 50 recipients in
# a single call to the API.
Destination={
'ToAddresses': TOADDRESSES,
'CcAddresses': CCADDRESSES,
'BccAddresses': BCCADDRESSES
},
# The body of the email message.
Content={
# Create a new Simple message. If you need to include attachments,
# you should send a RawMessage instead.
'Simple': {
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
},
'Body': {
'Html': {
'Charset': CHARSET,
'Data': BODY_HTML
},
'Text': {
'Charset': CHARSET,
'Data': BODY_TEXT,
}
}
}
},
# The configuration set that you want to use when you send this message.
ConfigurationSetName=CONFIGURATION_SET,
EmailTags=[
TAG0,
TAG1
]
)
# Display an error if something goes wrong.
except ClientError as e:
print("The message wasn't sent. Error message: \"" + e.response['Error']['Message'] + "\"")
else:
print("Email sent!")
print("Message ID: " + response['MessageId'])
# snippet-end:[pinpoint.python.pinpoint_send_email_message_email_api.complete]
| 37.007407
| 189
| 0.658527
|
7a60d990f253eacb755ab5f85b43467cb4bd1282
| 5,339
|
py
|
Python
|
pygmt/src/grdfilter.py
|
ankitdobhal/pygmt
|
88fafa5af57d2b182e0dbac7017912f2d8cabfa0
|
[
"BSD-3-Clause"
] | null | null | null |
pygmt/src/grdfilter.py
|
ankitdobhal/pygmt
|
88fafa5af57d2b182e0dbac7017912f2d8cabfa0
|
[
"BSD-3-Clause"
] | null | null | null |
pygmt/src/grdfilter.py
|
ankitdobhal/pygmt
|
88fafa5af57d2b182e0dbac7017912f2d8cabfa0
|
[
"BSD-3-Clause"
] | null | null | null |
"""
grdfilter - Filter a grid in the space (or time) domain.
"""
import xarray as xr
from pygmt.clib import Session
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
@fmt_docstring
@use_alias(
D="distance",
F="filter",
G="outgrid",
I="spacing",
N="nans",
R="region",
T="toggle",
V="verbose",
f="coltypes",
)
@kwargs_to_strings(R="sequence")
def grdfilter(grid, **kwargs):
r"""
Filter a grid in the space (or time) domain.
Filter a grid file in the time domain using one of the selected convolution
or non-convolution isotropic or rectangular filters and compute distances
using Cartesian or Spherical geometries. The output grid file can
optionally be generated as a sub-region of the input (via ``region``)
and/or with new increment (via ``spacing``) or registration
(via ``toggle``). In this way, one may have "extra space" in the input
data so that the edges will not be used and the output can be within one
half-width of the input edges. If the filter is low-pass, then the output
may be less frequently sampled than the input.
Full option list at :gmt-docs:`grdfilter.html`
{aliases}
Parameters
----------
grid : str or xarray.DataArray
The file name of the input grid or the grid loaded as a DataArray.
outgrid : str or None
The name of the output netCDF file with extension .nc to store the grid
in.
filter : str
**b**\|\ **c**\|\ **g**\|\ **o**\|\ **m**\|\ **p**\|\ **h**\ *xwidth*\
[/*width2*\][*modifiers*].
Name of filter type you which to apply, followed by the width:
b: Box Car
c: Cosine Arch
g: Gaussian
o: Operator
m: Median
p: Maximum Likelihood probability
h: histogram
distance : str
Distance *flag* tells how grid (x,y) relates to filter width as
follows:
p: grid (px,py) with *width* an odd number of pixels; Cartesian
distances.
0: grid (x,y) same units as *width*, Cartesian distances.
1: grid (x,y) in degrees, *width* in kilometers, Cartesian distances.
2: grid (x,y) in degrees, *width* in km, dx scaled by cos(middle y),
Cartesian distances.
The above options are fastest because they allow weight matrix to be
computed only once. The next three options are slower because they
recompute weights for each latitude.
3: grid (x,y) in degrees, *width* in km, dx scaled by cosine(y),
Cartesian distance calculation.
4: grid (x,y) in degrees, *width* in km, Spherical distance
calculation.
5: grid (x,y) in Mercator ``projection='m1'`` img units, *width* in km,
Spherical distance calculation.
spacing : str
*xinc*\[\ *unit*\][**+e**\|\ **n**]
[/*yinc*\ [*unit*][**+e**\|\ **n**]].
*xinc* [and optionally *yinc*] is the grid spacing.
nans : str or float
**i**\|\ **p**\|\ **r**.
Determine how NaN-values in the input grid affects the filtered output.
{R}
toggle : bool
Toggle the node registration for the output grid so as to become the
opposite of the input grid. [Default gives the same registration as the
input grid].
{V}
{f}
Returns
-------
ret: xarray.DataArray or None
Return type depends on whether the ``outgrid`` parameter is set:
- :class:`xarray.DataArray` if ``outgrid`` is not set
- None if ``outgrid`` is set (grid output will be stored in file set by
``outgrid``)
Examples
--------
>>> import os
>>> import pygmt
>>> # Apply a filter of 600km (full width) to the @earth_relief_30m file
>>> # and return a filtered field (saved as netcdf)
>>> pygmt.grdfilter(
... grid="@earth_relief_30m",
... filter="m600",
... distance="4",
... region=[150, 250, 10, 40],
... spacing=0.5,
... outgrid="filtered_pacific.nc",
... )
>>> os.remove("filtered_pacific.nc") # cleanup file
>>> # Apply a gaussian smoothing filter of 600 km in the input data array,
>>> # and returns a filtered data array with the smoothed field.
>>> grid = pygmt.datasets.load_earth_relief()
>>> smooth_field = pygmt.grdfilter(grid=grid, filter="g600", distance="4")
"""
with GMTTempFile(suffix=".nc") as tmpfile:
with Session() as lib:
file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
with file_context as infile:
if "G" not in kwargs.keys(): # if outgrid is unset, output to tempfile
kwargs.update({"G": tmpfile.name})
outgrid = kwargs["G"]
arg_str = " ".join([infile, build_arg_string(kwargs)])
lib.call_module("grdfilter", arg_str)
if outgrid == tmpfile.name: # if user did not set outgrid, return DataArray
with xr.open_dataarray(outgrid) as dataarray:
result = dataarray.load()
_ = result.gmt # load GMTDataArray accessor information
else:
result = None # if user sets an outgrid, return None
return result
| 32.754601
| 87
| 0.601985
|
702b40da6a798544a1a847793ad4e5d6d99ace52
| 4,861
|
py
|
Python
|
userbot/utils/pastebin.py
|
tofikdn/Man-Userbot
|
1ba63b30a42332b64fb3e2d1ac41c5db744846d2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-08-16T13:10:59.000Z
|
2021-08-16T13:10:59.000Z
|
userbot/utils/pastebin.py
|
tofikdn/Man-Userbot
|
1ba63b30a42332b64fb3e2d1ac41c5db744846d2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 23
|
2021-08-20T16:50:46.000Z
|
2022-01-14T19:05:00.000Z
|
userbot/utils/pastebin.py
|
tofikdn/Man-Userbot
|
1ba63b30a42332b64fb3e2d1ac41c5db744846d2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 8
|
2021-08-16T13:11:14.000Z
|
2022-03-15T13:27:39.000Z
|
import re
import aiohttp
from aiohttp.client_exceptions import ClientConnectorError
class PasteBin:
DOGBIN_URL = "https://del.dog/"
HASTEBIN_URL = "https://www.toptal.com/developers/hastebin/"
NEKOBIN_URL = "https://nekobin.com/"
KATBIN_URL = "https://katb.in/"
_dkey = _hkey = _nkey = _kkey = retry = None
service_match = {"-d": "dogbin", "-n": "nekobin", "-h": "hastebin", "-k": "katbin"}
def __init__(self, data: str = None):
self.http = aiohttp.ClientSession()
self.data = data
self.retries = 4
def __bool__(self):
return bool(self._dkey or self._nkey or self._hkey or self._kkey)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
async def close(self):
await self.http.close()
async def __call__(self, service="dogbin"):
if service == "dogbin":
await self._post_dogbin()
elif service == "nekobin":
await self._post_nekobin()
elif service == "hastebin":
await self._post_hastebin()
elif service == "katbin":
await self._post_katbin()
else:
raise KeyError(f"Unknown service input: {service}")
async def _get_katbin_token(self):
token = None
async with self.http.get(self.KATBIN_URL) as req:
if req.status != 200:
return token
content = await req.text()
for i in re.finditer(r'name="_csrf_token".+value="(.+)"', content):
token = i.group(1)
break
return token
async def _post_dogbin(self):
if self._dkey:
return
try:
async with self.http.post(
self.DOGBIN_URL + "documents", data=self.data.encode("utf-8")
) as req:
if req.status == 200:
res = await req.json()
self._dkey = res["key"]
else:
self.retry = "nekobin"
except ClientConnectorError:
self.retry = "nekobin"
async def _post_nekobin(self):
if self._nkey:
return
try:
async with self.http.post(
self.NEKOBIN_URL + "api/documents", json={"content": self.data}
) as req:
if req.status == 201:
res = await req.json()
self._nkey = res["result"]["key"]
else:
self.retry = "hastebin"
except ClientConnectorError:
self.retry = "hastebin"
async def _post_hastebin(self):
if self._hkey:
return
try:
async with self.http.post(
self.HASTEBIN_URL + "documents", data=self.data.encode("utf-8")
) as req:
if req.status == 200:
res = await req.json()
self._hkey = res["key"]
else:
self.retry = "katbin"
except ClientConnectorError:
self.retry = "katbin"
async def _post_katbin(self):
if self._kkey:
return
token = await self._get_katbin_token()
if not token:
return
try:
async with self.http.post(
self.KATBIN_URL,
data={"_csrf_token": token, "paste[content]": self.data},
) as req:
if req.status != 200:
self.retry = "dogbin"
else:
self._kkey = str(req.url).split(self.KATBIN_URL)[-1]
except ClientConnectorError:
self.retry = "dogbin"
async def post(self, serv: str = "dogbin"):
"""Post the initialized data to the pastebin service."""
if self.retries == 0:
return
await self.__call__(serv)
if self.retry:
self.retries -= 1
await self.post(self.retry)
self.retry = None
@property
def link(self) -> str:
"""Return the view link"""
if self._dkey:
return self.DOGBIN_URL + self._dkey
if self._nkey:
return self.NEKOBIN_URL + self._nkey
if self._hkey:
return self.HASTEBIN_URL + self._hkey
if self._kkey:
return self.KATBIN_URL + self._kkey
return False
@property
def raw_link(self) -> str:
"""Return the view raw link"""
if self._dkey:
return self.DOGBIN_URL + "raw/" + self._dkey
if self._nkey:
return self.NEKOBIN_URL + "raw/" + self._nkey
if self._hkey:
return self.HASTEBIN_URL + "raw/" + self._hkey
if self._kkey:
return self.KATBIN_URL + "raw/" + self._kkey
return False
| 31.36129
| 87
| 0.521498
|
4652cc577ba59730a8a97205a6758093b3afe702
| 4,006
|
py
|
Python
|
jwtcat.py
|
brightio/jwtcat
|
86bf874104a8dd2c3df6be4e3776b1bb071dd23c
|
[
"Apache-2.0"
] | null | null | null |
jwtcat.py
|
brightio/jwtcat
|
86bf874104a8dd2c3df6be4e3776b1bb071dd23c
|
[
"Apache-2.0"
] | null | null | null |
jwtcat.py
|
brightio/jwtcat
|
86bf874104a8dd2c3df6be4e3776b1bb071dd23c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (C) 2017 Alexandre Teyar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from datetime import datetime, timedelta
import colorlog
import jwt
import logging
import os
import signal
import sys
import time
formatter = colorlog.ColoredFormatter(
"%(log_color)s[%(levelname)s] %(message)s%(reset)s",
reset = True,
log_colors = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red, bg_white',
}
)
handler = colorlog.StreamHandler()
handler.setFormatter(formatter)
logger = colorlog.getLogger("jwtcatLog")
logger.addHandler(handler)
def parse_args():
""" Parse and validate user's command line
"""
parser = argparse.ArgumentParser(
description = "JSON Web Token brute-forcer"
)
parser.add_argument(
"-t", "--token",
dest = "token",
help = "JSON Web Token",
required = True,
type = str
)
parser.add_argument(
"-v", "--verbose",
dest = "loglevel",
help = "enable verbose",
required = False,
action = "store_const",
const = logging.DEBUG,
default = logging.INFO
)
# Set the UTF-8 encoding and ignore error mode to avoid issues with the wordlist
parser.add_argument(
"-w", "--wordlist",
dest = "wordlist",
help = "wordlist containing the passwords",
required = True,
type = argparse.FileType(
'r',
encoding = 'UTF-8',
errors = 'ignore'
)
)
return parser.parse_args()
def run(token, word):
""" Check if [word] can decrypt [token]
"""
try:
payload = jwt.decode(token, word, algorithm = 'HS256')
return True
except jwt.exceptions.InvalidTokenError:
logger.debug("InvalidTokenError: {}".format(word))
return False
except jwt.exceptions.DecodeError:
logger.debug("DecodingError: {}".format(word))
return False
except Exception as ex:
logger.exception("Exception: {}".format(ex))
sys.exit(1)
def main():
try:
args = parse_args()
logger.setLevel(args.loglevel)
token = args.token
wordlist = args.wordlist
logger.info("JWT: {}".format(token))
logger.info("Wordlist: {}".format(wordlist.name))
logger.info("Starting brute-force attacks")
logger.warning("Pour yourself some coffee, this might take a while..." )
start_time = time.time()
for entry in wordlist:
word = entry.rstrip()
result = run(token, word)
if result:
logger.info("Secret key: {}".format(word))
# Save the holy secret into a file in case sys.stdout is not responding
with open("jwtpot.potfile", "a+") as file:
file.write("{0}:{1}\n".format(token, word))
logger.info("Secret key saved to location: {}".format(file.name))
break
end_time = time.time()
elapsed_time = end_time - start_time
logger.info("Finished in {} sec".format(elapsed_time))
except KeyboardInterrupt:
logger.error("CTRL+C pressed, exiting...")
wordlist.close()
elapsed_time = time.time() - start_time
logger.info("Interrupted after {} sec".format(elapsed_time))
if __name__ == "__main__":
main()
| 28.211268
| 87
| 0.603345
|
8cf918efc3ceb4a7da1247547d83503797be3cb0
| 1,205
|
gyp
|
Python
|
library/boost-pool/1.57.0.gyp
|
KjellSchubert/bru
|
dd70b721d07fbd27c57c845cc3a29cd8f2dfc587
|
[
"MIT"
] | 3
|
2015-01-06T15:22:16.000Z
|
2015-11-27T18:13:04.000Z
|
library/boost-pool/1.57.0.gyp
|
KjellSchubert/bru
|
dd70b721d07fbd27c57c845cc3a29cd8f2dfc587
|
[
"MIT"
] | 7
|
2015-02-10T15:13:38.000Z
|
2021-05-30T07:51:13.000Z
|
library/boost-pool/1.57.0.gyp
|
KjellSchubert/bru
|
dd70b721d07fbd27c57c845cc3a29cd8f2dfc587
|
[
"MIT"
] | 3
|
2015-01-29T17:19:53.000Z
|
2016-01-06T12:50:06.000Z
|
{
"targets": [
{
"target_name": "boost-pool",
"type": "none",
"include_dirs": [
"1.57.0/pool-boost-1.57.0/include"
],
"all_dependent_settings": {
"include_dirs": [
"1.57.0/pool-boost-1.57.0/include"
]
},
"dependencies": [
"../boost-config/boost-config.gyp:*",
"../boost-assert/boost-assert.gyp:*",
"../boost-throw_exception/boost-throw_exception.gyp:*",
"../boost-math/boost-math.gyp:*",
"../boost-mpl/boost-mpl.gyp:*",
"../boost-thread/boost-thread.gyp:*"
]
},
{
"target_name": "boost-pool_time_pool_alloc",
"type": "executable",
"test": {},
"sources": ["1.57.0/pool-boost-1.57.0/example/time_pool_alloc.cpp"],
"dependencies": [ "boost-pool" ],
# this disables building the example on iOS
"conditions": [
["OS=='iOS'",{"type": "none"}],
["OS=='mac'",{"type": "none"}]
]
}
]
}
| 32.567568
| 80
| 0.409959
|
3557673aeb7a14de6e5e293a2382ef3197b22806
| 13,627
|
py
|
Python
|
GhostScan/Calibrations/RadiometricCalibration.py
|
yyf20001230/GhostScan
|
5694df4532132be5e916bd72a46dc907eb108bf9
|
[
"MIT"
] | 4
|
2021-09-27T14:16:08.000Z
|
2022-03-17T07:03:18.000Z
|
GhostScan/Calibrations/RadiometricCalibration.py
|
clkimsdu/GhostScan
|
5694df4532132be5e916bd72a46dc907eb108bf9
|
[
"MIT"
] | null | null | null |
GhostScan/Calibrations/RadiometricCalibration.py
|
clkimsdu/GhostScan
|
5694df4532132be5e916bd72a46dc907eb108bf9
|
[
"MIT"
] | 2
|
2022-02-04T17:32:04.000Z
|
2022-03-31T09:53:20.000Z
|
import numpy as np
import os
import cv2
import glob
import matplotlib.pyplot as plt
import skimage
from skimage.util import img_as_ubyte
from skimage.color import rgb2gray
import PIL
class RadiometricCalibration:
def __init__(self, resolution, gamma=0.57, sampling_points=1000, path='CalibrationImages/Radiometric'):
# # Set camera, destination path
self.path = path
# Get image resolution:
self.width, self.height = resolution
# print((self.width, self.height))
# (1920, 1200)
# Amount of sample points per image - to speed up calculation
self.sampling_points = sampling_points
# Initialize g function with None for later same for log exposure values
# Gamma correction:
self.gamma = gamma
self.g = None
self.w = None
self.le = None
# Raw captured data
self.raw_data = None
# Down-sampled data
self.raw_samples = None
# Exposure times
self.exposures = None
def load_calibration_data(self):
# Check if calibration file already exists
if os.path.exists('CalibrationNumpyData/radiometric.npz'):
# Load g function, log exposure, weighting function, exposures, raw samples
data = np.load('CalibrationNumpyData/radiometric.npz')
self.g = data['g_function']
self.le = data['log_exposures']
self.w = data['w_function']
self.exposures = data['exposures']
self.raw_samples = data['samples']
else:
print("Capture and calibrate camera first")
return self.g
def compute_gamma_colorchart(self, intensities):
# Intensities is an 1D-array of the capture intensity values of the gray tiles on the checker board
# Returns a gamma values that fits the captured intensities to a linear plot
no_intensities = intensities.shape[0]
intensities = (intensities-np.min(intensities))/(np.max(intensities)-np.min(intensities))
ground_truth = np.linspace(0, 1, no_intensities)
# Disregard zero values because their logarithm is not defined
self.gamma = np.sum(np.log(intensities[1:])*np.log(ground_truth[1:]))/np.sum(np.log(intensities[1:])**2)
return self.gamma
def load_raw_data(self):
# Loading raw data files
k = 0
# Empty lists for images and exposure times
Exposure = []
self.raw_data = []
files = []
for file in os.listdir(self.path):
# Only use .png files
if file.endswith(".PNG") or file.endswith(".png") or file.endswith(".Png"):
files.append(file)
# Sort files depending on their exposure time from lowest to highest
files.sort(key=lambda x: int(x[:-4]))
# We used exposure time as filenames
print("loading data..")
for filename in files:
image = PIL.Image.open(self.path + '/' + filename)
image = np.asarray(image,dtype=np.uint16)
image = rgb2gray(image)
image = image * 65535
print("Image" + str(k) + " intensity max:" + str(image.max()))
# for .raw file, we need to know the picture shape in advance
self.width = image.shape[0]
self.height = image.shape[1]
filename = os.path.splitext(filename)[0] + '\n'
Exposure.append(int(filename))
self.raw_data.append(image)
# k is used to count the number of pictures
k = k + 1
ExposureNumber = k
# Shrink the number of samples #
Z = np.zeros([self.sampling_points, ExposureNumber], int)
# Choose random sample points within image
row = np.random.randint(self.width, size=self.sampling_points)
col = np.random.randint(self.height, size=self.sampling_points)
for i in range(ExposureNumber):
Z[:, i] = self.raw_data[i][row, col]
# Initialize to raw_samples
self.raw_samples = Z
exps = np.sort(np.array(Exposure))
# Check if loaded exposure values match the predefined values
self.exposures = exps
print("Radiometric raw data loaded...")
def plotCurve(self, title):
"""
This function will plot the curve of the solved G function and the measured pixels. You don't need to return anything in this function.
Input
solveG: A (256,1) array. Solved G function generated in the previous section.
LE: Log Erradiance of the image.
logexpTime: (k,) array, k is the number of input images. Log exposure time.
zValues: m*n array. m is the number of sampling points, and n is the number of input images. Z value generated in the previous section.
Please note that in this function, we only take z value in ONLY ONE CHANNEL.
title: A string. Title of the plot.
"""
logexpTime = np.log(self.exposures*(10**-6))
fig = plt.figure()
plt.title(title)
plt.xlabel('Log exposure')
plt.ylabel('Pixel intensity value')
LEx = np.expand_dims(self.le, axis=1)
LEx = np.repeat(LEx, logexpTime.shape[0], axis=1)
logx = np.expand_dims(logexpTime, axis=1)
logx = np.swapaxes(logx, 0, 1)
logx = np.repeat(logx, self.le.shape[0], axis=0)
x = logx + LEx
plt.plot(x, self.raw_samples, 'ro', alpha=0.5)
plt.plot(self.g, np.linspace(0, 255, 256))
if not os.path.exists('CapturedImages/'):
os.mkdir('CapturedImages/')
if not os.path.exists('CapturedImages/sequenceImages/'):
os.mkdir('CapturedImages/sequenceImages/')
if not os.path.exists('CapturedImages/sequenceImages/undistortRadioCalib/'):
os.mkdir('CapturedImages/sequenceImages/undistortRadioCalib/')
if not os.path.exists('CapturedImages/sequenceImages/undistortRadioCalib/radioCalibResults/'):
os.mkdir('CapturedImages/sequenceImages/undistortRadioCalib/radioCalibResults/')
fig.savefig('CapturedImages/sequenceImages/undistortRadioCalib/radioCalibResults/Camera response.png')
print('Camera Response plot successful! Plot viewable at CapturedImages/sequenceImages/undistortRadioCalib/radioCalibResults')
# plt.show()
def get_camera_response(self, smoothness):
"""
Some explanation for solving g:
Given a set of pixel values observed for several pixels in several
images with different exposure times, this function returns the
imaging system's response function g as well as the log film irradiance
values for the observed pixels.
Assumes:
Zmin = 0
Zmax = 255
Arguments:
self.raw_sample - Z(i, j) is the pixel values of pixel location number i in image j
self.exposure B(j) is the log delta t, or log shutter speed, for image j
l is the lamda, the constant that determines the amount of smoothness
w(z) is the weighting function value for pixel value z
Returns:
g(z) is the log exposure corresponding to pixel value z
lE(i) is the log film irradiance at pixel location i
"""
# Load raw data
self.load_raw_data()
Z = self.raw_samples.astype(np.int)
# Convert exposure to log exposure
B = np.log(self.exposures*(10**-6))
# Next is to calculate g function #
n = 256
# Create weighting function - hat like
"""
self.w = np.ones([256, 1])
for i in range(128):
self.w[i] = i + 1
for i in range(128, 255):
self.w[i] = 256 - i
"""
self.w = np.ones((n, 1)) / n
m = Z.shape[0]
p = Z.shape[1]
A = np.zeros((m * p + n + 1, n + m))
b = np.zeros((A.shape[0], 1))
k = 0
# Data fitting equations
for i in range(m):
for j in range(p):
wij = self.w[Z[i, j]]
A[k, Z[i, j]] = wij
A[k, n + i] = -wij
b[k, 0] = wij * B[j]
k += 1
# Fix the curve by setting its middle value to 0
A[k, 128] = 1
k = k + 1
# Include smoothness equations
for i in range(n - 2):
A[k, i] = smoothness * self.w[i + 1]
A[k, i + 1] = -2 * smoothness * self.w[i + 1]
A[k, i + 2] = smoothness * self.w[i + 1]
k = k + 1
# Solve the system using SVD
x = np.linalg.lstsq(A, b, rcond=None)
x = x[0]
self.g = x[0:n]
lE = x[n:x.shape[0]]
self.le = lE.squeeze()
# Save g function, exposures, etc for loading
np.savez('CalibrationNumpyData/radiometric.npz', g_function=self.g, log_exposures=self.le[::10],
w_function=self.w, exposures=self.exposures, samples=self.raw_samples[::10, :])
return self.g, self.le
def get_HDR_image(self, images=None, exposures=None):
# If images is None, take radiometric calibration images
if images is None:
if self.raw_data is None:
self.load_raw_data()
images = self.raw_data
else:
images = self.raw_data
# If g function is None, load calibration
if self.g is None:
self.load_calibration_data()
# Override exposure values
if exposures is not None:
self.exposures = exposures
# Compute log exposure image
# Initialize flatten
size = (int(self.height * 1), int(self.width * 1))
EE = np.zeros([size[0] * size[1], 1])
sumw = np.zeros([size[0] * size[1], 1], int)
# Convert exposure from microseconds to seconds
exp_sec = self.exposures * (10 ** -6)
num_exp = self.exposures.shape[0]
for i in range(num_exp):
t = images[i].flatten()
EE = EE + self.w[t] * (self.g[t] - np.log(exp_sec[i]))
#EE = EE + (self.g[t] - np.log(exp_sec[i]))
sumw = sumw + self.w[t]
# Reshape
lE = np.reshape(EE / sumw, size)
#lE = np.reshape(EE / num_exp, size)
# Take exponent to get exposure for each pixel
exposure_image = np.exp(lE)
return exposure_image
def calibrate_image(self, exposure, path):
# UNUSED
# Create list of calibrated images
images = []
# Exposure in microseconds -> convert to seconds
exp = exposure * (10 ** -6)
g = np.exp(self.g)
# Load images
imgFileList = self.readFileList(path)
# Idx
k = 0
# Iterate over images to be calibrated
for i in imgFileList:
# Read image and convert to grayscale if necessary
img = cv2.imread(i)
if img.shape[2] == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
# Applying the Debevec Algorithm
# Eq. 5 Debevec et. al.
calibrated_image = g[gray]
#calibrated_image = np.exp(calibrated_image - np.log(exp))
calibrated_image = calibrated_image - np.log(exp)
#calibrated_image *= 255.0 / calibrated_image.max()
images.append(calibrated_image)
k += 1
# Normalize by last captured image, which represents the object lit by a constant (255) illumination pattern
illuminated_radiance = images.pop()
for r in range(len(images)):
n_img = images[r]/illuminated_radiance
#n_img = images[r]
# Gamma correction
#n_img = (n_img - np.min(n_img)) / (np.max(n_img) - np.min(n_img))
n_img = self.apply_gamma_curve(n_img, gamma=0.4)
cv2.imwrite(path + '/RadianceMaps/capture' + str(r) + '.PNG', n_img*255)
np.save(path + '/RadianceMaps/capture_' + str(r) + '.npy', n_img)
images[r] = n_img
return images, g
@staticmethod
def scaleBrightness(E):
# Unused
"""
Brightness scaling function, which will scale the values on the radiance map to between 0 and 1
Args:
E: An m*n*3 array. m*n is the size of your radiance map, and 3 represents R, G and B channel. It is your plotted Radiance map (don't forget to use np.exp function to get it back from logorithm of radiance!)
Returns:
ENomrMap: An m*n*3 array. Normalized radiance map, whose value should between 0 and 1
"""
res = np.zeros(E.shape)
for c in range(E.shape[2]):
res[:, :, c] = (E[:, :, c] - np.min(E[:, :, c])) / (np.max(E[:, :, c]) - np.min(E[:, :, c]))
return res
@staticmethod
def apply_gamma_curve(E, gamma=0.4):
# Unused
"""
apply gamma to the curve through raising E to the gamma.
Args:
E: An m*n*3 array. m*n is the size of your radiance map, and 3 represents R, G and B channel. It is your plotted Radiance map (don't forget to use np.exp function to get it back from logorithm of radiance!)
gamma: a float value that is representative of the power to raise all E to.
Returns:
E_gamma: E modified by raising it to gamma.
"""
return E ** gamma
@staticmethod
def readFileList(imgFolder, ImgPattern="*.PNG"):
imgFileList = glob.glob(os.path.join(imgFolder, ImgPattern))
imgFileList.sort()
print(imgFileList)
return imgFileList
| 42.188854
| 218
| 0.589565
|
2463735ede96b66f77679e993ba5bd55450577ed
| 2,626
|
py
|
Python
|
src/common.py
|
Antoinehoff/Project_II
|
120209e695f4f25ecdc6797f683e2b23894689f4
|
[
"MIT"
] | null | null | null |
src/common.py
|
Antoinehoff/Project_II
|
120209e695f4f25ecdc6797f683e2b23894689f4
|
[
"MIT"
] | null | null | null |
src/common.py
|
Antoinehoff/Project_II
|
120209e695f4f25ecdc6797f683e2b23894689f4
|
[
"MIT"
] | null | null | null |
from enum import Enum
class FilterType(Enum):
"""
Filter type (filter densities, or gradients only)
"""
NoFilter = 0
Density = 1
Sensitivity = 2
class InterpolationType(Enum):
"""
Material interpolation scheme: classic SIMP, or Pedersen (for self-weight problems)
"""
SIMP = 1
Pedersen = 2
class ProblemType(Enum):
"""
Problem type. Minimize appearance only, minimize compliance only, or minimize
appearance with a compliance constraint.
"""
Appearance = 1
Compliance = 2
AppearanceWithMaxCompliance = 3
###Added by Antoine Hoffmann EPFL 2018
ComplianceWithSymmetry = 4
AppearanceWithMaxComplianceAndSymmetry = 5
def involves_symmetry(self):
"""
Returns true iff the given problem type has symmetry
"""
return self in (ProblemType.ComplianceWithSymmetry
,ProblemType.AppearanceWithMaxComplianceAndSymmetry)
######
def involves_appearance(self):
"""
Returns true iff the given problem type requires the appearance evaluation.
"""
return self in (ProblemType.Appearance, ProblemType.AppearanceWithMaxCompliance
,ProblemType.AppearanceWithMaxComplianceAndSymmetry)
def involves_compliance(self):
"""
Returns true iff the given problem type requires the appearance evaluation.
"""
return self in (ProblemType.Compliance, ProblemType.AppearanceWithMaxCompliance
,ProblemType.ComplianceWithSymmetry
,ProblemType.AppearanceWithMaxComplianceAndSymmetry)
def involves_volume(self):
"""
Returns true iff the given problem type has a volume constraint.
"""
return self in (ProblemType.Compliance, ProblemType.AppearanceWithMaxCompliance
,ProblemType.ComplianceWithSymmetry
,ProblemType.AppearanceWithMaxComplianceAndSymmetry)
def has_compliance_constraint(self):
"""
Returns true iff the given problem has a constraint on the compliance.
"""
return self in (ProblemType.AppearanceWithMaxCompliance
,ProblemType.AppearanceWithMaxComplianceAndSymmetry)
def has_volume_constraint(self):
"""
Returns true iff the given problem has a constraint on the volume.
"""
return self in (ProblemType.Compliance, ProblemType.AppearanceWithMaxCompliance
,ProblemType.ComplianceWithSymmetry
,ProblemType.AppearanceWithMaxComplianceAndSymmetry)
| 32.825
| 87
| 0.664128
|
50ab83e8dd2f03fdb4b6e963f21dcdec0ad1c14d
| 2,329
|
py
|
Python
|
sistemadjango/settings.py
|
pauloupgrad/sistema-django
|
f1a7fc9f5602ec6eb1e3a777897ff94ee4b2e1ff
|
[
"MIT"
] | null | null | null |
sistemadjango/settings.py
|
pauloupgrad/sistema-django
|
f1a7fc9f5602ec6eb1e3a777897ff94ee4b2e1ff
|
[
"MIT"
] | null | null | null |
sistemadjango/settings.py
|
pauloupgrad/sistema-django
|
f1a7fc9f5602ec6eb1e3a777897ff94ee4b2e1ff
|
[
"MIT"
] | null | null | null |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'k6g)@@r-kwa+yz_8x4)d7c@$t!-j7n=m1yim#fi_%dn#ssp*=@'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sistemadjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sistemadjango.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES = (os.path.join(BASE_DIR,'static'),)
| 23.525253
| 91
| 0.668527
|
39a59208fd1d5090c76bd25fd41f379c787cf8ce
| 192
|
py
|
Python
|
v3/listing1.py
|
MrWater98/pymag-trees
|
9b4fba4fca09f7489f6cb1844d2db256377a4af9
|
[
"WTFPL"
] | 149
|
2015-01-18T14:26:41.000Z
|
2022-03-27T12:39:38.000Z
|
doc/v3/listing1.py
|
EvanED/treelayout
|
a1250381ccbab005890f58ac4bfc28e2a1933433
|
[
"WTFPL"
] | 5
|
2015-05-23T04:14:46.000Z
|
2021-11-04T14:19:06.000Z
|
doc/v3/listing1.py
|
EvanED/treelayout
|
a1250381ccbab005890f58ac4bfc28e2a1933433
|
[
"WTFPL"
] | 33
|
2015-06-06T04:38:55.000Z
|
2022-01-11T19:56:14.000Z
|
class DrawTree(object):
def __init__(self, tree, depth=0):
self.x = -1
self.y = depth
self.tree = tree
self.children = [DrawTree(t, depth+1) for t in tree]
| 27.428571
| 60
| 0.567708
|
a4ae733985a170d2fe19dbbb3e88c6e0708d692b
| 1,607
|
py
|
Python
|
listing/urls.py
|
natyz/Studdy-Buddy-Finder
|
84c50494f2696df2555d6d985534cdd4edbce791
|
[
"BSD-3-Clause"
] | null | null | null |
listing/urls.py
|
natyz/Studdy-Buddy-Finder
|
84c50494f2696df2555d6d985534cdd4edbce791
|
[
"BSD-3-Clause"
] | null | null | null |
listing/urls.py
|
natyz/Studdy-Buddy-Finder
|
84c50494f2696df2555d6d985534cdd4edbce791
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from . import views
app_name = 'listing'
urlpatterns = [
# path('listings', views.ListingsView.as_view(), name='listings'),
# path('listings', views.index, name='listings'),
path('listings', views.listings_view, name='listings'),
path('listings/table', views.listings_table_view, name='table'),
path('mylistings', views.users_listings_view, name='mylistings'),
path('listings/my', views.ListingsView.as_view(), name='my'),
# path('select', views.user_name, name='select'),
path('listings/create', views.new_listing_form, name='create'),
path('listings/<id>/delete', views.delete_listing, name='delete'),
path('listings/<id>/', views.detail_view, name='detail'),
path('listings/<id>/join', views.join_group, name='join'),
path('listings/<id>/leave', views.leave_group, name='leave'),
path('listings/<id>/join/zoom', views.join_zoom, name='zoom'),
path('listings/<id>/edit/', views.edit, name='edit'),
# path('listings/<id>/edit/', views.listing_update, name='edit'),
# path('listings/<id>/edit', views.edit_listing, name='edit'),
# path('listings/<int:id>/edit/', views.EditListingView.as_view(), name='edit'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 55.413793
| 98
| 0.581829
|
66dc7c7e7848c8b80685b428334555a2edbca43f
| 2,136
|
py
|
Python
|
functional-tests/test_full_checkout.py
|
cornelius/unit-e-clonemachine
|
e0bf1f3b49502a506c1edf9ea35101424008fa5d
|
[
"MIT"
] | 3
|
2019-04-18T06:44:30.000Z
|
2019-05-03T15:15:18.000Z
|
functional-tests/test_full_checkout.py
|
cornelius/unit-e-clonemachine
|
e0bf1f3b49502a506c1edf9ea35101424008fa5d
|
[
"MIT"
] | 2
|
2019-05-03T15:15:22.000Z
|
2019-05-17T09:28:18.000Z
|
functional-tests/test_full_checkout.py
|
cornelius/unit-e-clonemachine
|
e0bf1f3b49502a506c1edf9ea35101424008fa5d
|
[
"MIT"
] | 4
|
2019-04-17T18:05:22.000Z
|
2019-11-01T19:57:51.000Z
|
# Copyright (c) 2018-2019 The Unit-e developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://opensource.org/licenses/MIT.
# Functional test for clonemachine using a full checkout of the unit-e repo
# and a full fetched remote of the bitcoin repo
#
# Run it with `pytest -v test_full_checkout.py`
import pytest
import tempfile
import os
import subprocess
from pathlib import Path
import yaml
from runner import Runner
@pytest.fixture
def runner():
"""Set up git checkout for test and return a runner to run operations
on it.
"""
runner = Runner("unit-e")
runner.checkout_unit_e_clone()
runner.fetch_bitcoin()
return runner
def test_appropriation(runner):
runner.run_clonemachine()
# Check that one of the appropriated files is identical to the unit-e version
diff = runner.run_git(["diff", "master", "--", "CONTRIBUTING.md"])
assert diff == ""
# Check file list, assuming the latest commit is the appropriating commit
appropriated_files = runner.run_git(["diff-tree", "--name-only", "--no-commit-id", "-r", "HEAD"])
expected_files = """CONTRIBUTING.md
README.md
contrib/devtools/copyright_header.py
contrib/gitian-build.py
contrib/gitian-keys/keys.txt
doc/developer-notes.md
doc/gitian-building.md"""
assert appropriated_files == expected_files
# Check that commit message contains the revision of the appropriated files
unite_master_git_revision = runner.get_git_revision("master")
commit_msg = runner.run_git(["log", "-1", "--pretty=%B"])
assert "revision: " + unite_master_git_revision in commit_msg
def test_remove_files(runner):
files_to_be_removed = [".github/ISSUE_TEMPLATE.md", "contrib/gitian-descriptors/gitian-osx-signer.yml"]
for file in files_to_be_removed:
assert os.path.isfile(runner.git_dir / file)
result = runner.run_clonemachine()
with Path(os.path.dirname(__file__), "tmp", "clonemachine.log").open("w") as file:
file.write(result.stdout.decode('utf-8'))
for file in files_to_be_removed:
assert not os.path.isfile(runner.git_dir / file)
| 33.904762
| 107
| 0.727528
|
49121b29b8558d33d519a277f173bc2004b7a034
| 5,295
|
py
|
Python
|
operator-pipeline-images/operatorcert/entrypoints/index.py
|
Lawrence-Luo0008/operator-pipelines
|
61b0e970d377f142ca4249e1021bf01894f36f1f
|
[
"Apache-2.0"
] | null | null | null |
operator-pipeline-images/operatorcert/entrypoints/index.py
|
Lawrence-Luo0008/operator-pipelines
|
61b0e970d377f142ca4249e1021bf01894f36f1f
|
[
"Apache-2.0"
] | null | null | null |
operator-pipeline-images/operatorcert/entrypoints/index.py
|
Lawrence-Luo0008/operator-pipelines
|
61b0e970d377f142ca4249e1021bf01894f36f1f
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
import os
import time
from datetime import datetime, timedelta
from typing import Any, List
from operatorcert import iib, utils
from operatorcert.logger import setup_logger
LOGGER = logging.getLogger("operator-cert")
def setup_argparser() -> argparse.ArgumentParser: # pragma: no cover
"""
Setup argument parser
Returns:
Any: Initialized argument parser
"""
parser = argparse.ArgumentParser(description="Publish bundle to index image")
parser.add_argument(
"--bundle-pullspec", required=True, help="Operator bundle pullspec"
)
parser.add_argument(
"--from-index", required=True, help="Base index pullspec (without tag)"
)
parser.add_argument(
"--indices",
required=True,
nargs="+",
help="List of indices the bundle supports, e.g --indices registry/index:v4.9 registry/index:v4.8",
)
parser.add_argument(
"--iib-url",
default="https://iib.engineering.redhat.com",
help="Base URL for IIB API",
)
parser.add_argument("--verbose", action="store_true", help="Verbose output")
return parser
def wait_for_results(iib_url: str, batch_id: int, timeout=60 * 60, delay=20) -> Any:
"""
Wait for IIB build till it finishes
Args:
iib_url (Any): CLI arguments
batch_id (int): IIB batch identifier
timeout ([type], optional): Maximum wait time. Defaults to 60*60 (3600 seconds/1 hour)
delay (int, optional): Delay between build pollin. Defaults to 20.
Returns:
Any: Build response
"""
start_time = datetime.now()
loop = True
while loop:
response = iib.get_builds(iib_url, batch_id)
builds = response["items"]
# all builds have completed
if all([build.get("state") == "complete" for build in builds]):
LOGGER.info(f"IIB batch build completed successfully: {batch_id}")
return response
# any have failed
elif any([build.get("state") == "failed" for build in builds]):
for build in builds:
LOGGER.error(f"IIB build failed: {build['id']}")
state_history = build.get("state_history", [])
if state_history:
reason = state_history[0].get("state_reason")
LOGGER.info(f"Reason: {reason}")
return response
LOGGER.debug(f"Waiting for IIB batch build: {batch_id}")
LOGGER.debug("Current states [build id - state]:")
for build in builds:
LOGGER.debug(f"{build['id']} - {build['state']}")
if datetime.now() - start_time > timedelta(seconds=timeout):
LOGGER.error(f"Timeout: Waiting for IIB batch build failed: {batch_id}.")
break
LOGGER.info(f"Waiting for IIB batch build to finish: {batch_id}")
time.sleep(delay)
return None
def publish_bundle(
from_index: str, bundle_pullspec: str, iib_url: str, index_versions: List[str]
) -> None:
"""
Publish a bundle to index image using IIB
Args:
iib_url: url of IIB instance
bundle_pullspec: bundle pullspec
from_index: target index pullspec
index_versions: list of index versions (tags)
Raises:
Exception: Exception is raised when IIB build fails
"""
user = os.getenv("QUAY_USER")
token = os.getenv("QUAY_TOKEN")
payload = {"build_requests": []}
for version in index_versions:
payload["build_requests"].append(
{
"from_index": f"{from_index}:{version}",
"bundles": [bundle_pullspec],
"overwrite_from_index": True,
"add_arches": ["amd64", "s390x", "ppc64le"],
"overwrite_from_index_token": f"{user}:{token}",
}
)
resp = iib.add_builds(iib_url, payload)
batch_id = resp[0]["batch"]
response = wait_for_results(iib_url, batch_id)
if response is None or not all(
[build.get("state") == "complete" for build in response["items"]]
):
raise Exception("IIB build failed")
def parse_indices(indices: List[str]) -> List[str]:
"""
Parses a list of indices and returns only the versions,
e.g [registry/index:v4.9, registry/index:v4.8] -> [v4.9, v4.8]
Args:
indices: List of indices
Returns:
Parsed list of versions
"""
versions = []
for index in indices:
# split by : from right and get the rightmost result
split = index.rsplit(":", 1)
if len(split) == 1:
# unable to split by :
raise Exception(f"Unable to extract version from index {index}")
else:
versions.append(split[1])
return versions
def main() -> None: # pragma: no cover
"""
Main function
"""
parser = setup_argparser()
args = parser.parse_args()
log_level = "INFO"
if args.verbose:
log_level = "DEBUG"
setup_logger(level=log_level)
utils.set_client_keytab(os.environ.get("KRB_KEYTAB_FILE", "/etc/krb5.krb"))
publish_bundle(
args.from_index, args.bundle_pullspec, args.iib_url, parse_indices(args.indices)
)
if __name__ == "__main__": # pragma: no cover
main()
| 29.915254
| 106
| 0.611331
|
11a009a6b8dc954dcf0a2d323e6e9528897d984a
| 626
|
py
|
Python
|
main.py
|
caffe-mocha/pytorch-wgan-gp
|
570496f092c37629c872c528737ecdec12b0537b
|
[
"MIT"
] | 1
|
2021-01-11T14:42:20.000Z
|
2021-01-11T14:42:20.000Z
|
main.py
|
caffe-mocha/pytorch-wgan-gp
|
570496f092c37629c872c528737ecdec12b0537b
|
[
"MIT"
] | null | null | null |
main.py
|
caffe-mocha/pytorch-wgan-gp
|
570496f092c37629c872c528737ecdec12b0537b
|
[
"MIT"
] | null | null | null |
from utils.config import parse_args
from utils.data_loader import get_data_loader
from model.dcgan import DCGAN
def main(args):
model = None
if args.model == 'dcgan':
model = DCGAN(args)
else:
print("Model type non-existing. Try again.")
exit(-1)
print('----------------- configuration -----------------')
for k, v in vars(args).items():
print(' {}: {}'.format(k, v))
print('-------------------------------------------------')
data_loader = get_data_loader(args)
model.train(data_loader)
if __name__ == '__main__':
args = parse_args()
main(args)
| 23.185185
| 62
| 0.536741
|
81be118feda1bab7b5117a5de29e19e31669d826
| 690
|
py
|
Python
|
tests/test_deprecation.py
|
ppanero/flask-limiter
|
129bd922948f843518429190e915c5ebe4fec51f
|
[
"MIT"
] | 1
|
2019-08-30T15:28:58.000Z
|
2019-08-30T15:28:58.000Z
|
tests/test_deprecation.py
|
ppanero/flask-limiter
|
129bd922948f843518429190e915c5ebe4fec51f
|
[
"MIT"
] | null | null | null |
tests/test_deprecation.py
|
ppanero/flask-limiter
|
129bd922948f843518429190e915c5ebe4fec51f
|
[
"MIT"
] | null | null | null |
"""
"""
import unittest
import warnings
class DeprecationTests(unittest.TestCase):
def test_insecure_setup(self):
with warnings.catch_warnings(record=True) as w:
from flask import Flask
from flask_limiter import Limiter
app = Flask(__name__)
Limiter(app)
self.assertEqual(len(w), 1)
def test_with_global_limits(self):
with warnings.catch_warnings(record=True) as w:
from flask import Flask
from flask_limiter import Limiter
app = Flask(__name__)
Limiter(app, key_func=lambda x: 'test', global_limits=['1/second'])
self.assertEqual(len(w), 1)
| 28.75
| 79
| 0.623188
|
deaa9581e0bedb01283824a8d2699f480d7078e3
| 3,775
|
py
|
Python
|
awx/main/tests/functional/api/test_pagination.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,396
|
2017-09-07T04:56:02.000Z
|
2022-03-31T13:56:17.000Z
|
awx/main/tests/functional/api/test_pagination.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,046
|
2017-09-07T09:30:46.000Z
|
2022-03-31T20:28:01.000Z
|
awx/main/tests/functional/api/test_pagination.py
|
TinLe/awx
|
73d8c12e3bf5b193305ed1202549331ea00088c1
|
[
"Apache-2.0"
] | 3,592
|
2017-09-07T04:14:31.000Z
|
2022-03-31T23:53:09.000Z
|
import pytest
import json
from unittest.mock import patch
from urllib.parse import urlencode
from awx.main.models.inventory import Group, Host
from awx.main.models.ad_hoc_commands import AdHocCommand
from awx.api.pagination import Pagination
from awx.api.versioning import reverse
@pytest.fixture
def host(inventory):
def handler(name, groups):
h = Host(name=name, inventory=inventory)
h.save()
h = Host.objects.get(name=name, inventory=inventory)
for g in groups:
h.groups.add(g)
h.save()
h = Host.objects.get(name=name, inventory=inventory)
return h
return handler
@pytest.fixture
def group(inventory):
def handler(name):
g = Group(name=name, inventory=inventory)
g.save()
g = Group.objects.get(name=name, inventory=inventory)
return g
return handler
@pytest.mark.django_db
def test_pagination_backend_output_correct_total_count(group, host):
# NOTE: this test might not be db-backend-agnostic. Manual tests might be needed also
g1 = group('pg_group1')
g2 = group('pg_group2')
host('pg_host1', [g1, g2])
queryset = Host.objects.filter(groups__name__in=('pg_group1', 'pg_group2')).distinct()
p = Pagination().django_paginator_class(queryset, 10)
p.page(1)
assert p.count == 1
@pytest.mark.django_db
def test_pagination_cap_page_size(get, admin, inventory):
for i in range(20):
Host(name='host-{}'.format(i), inventory=inventory).save()
def host_list_url(params):
request_qs = '?' + urlencode(params)
return reverse('api:host_list') + request_qs
with patch('awx.api.pagination.Pagination.max_page_size', 5):
resp = get(host_list_url({'page': '2', 'page_size': '10'}), user=admin)
jdata = json.loads(resp.content)
assert jdata['previous'] == host_list_url({'page': '1', 'page_size': '5'})
assert jdata['next'] == host_list_url({'page': '3', 'page_size': '5'})
class TestUnifiedJobEventPagination:
@pytest.fixture
def ad_hoc_command(self, ad_hoc_command_factory):
return ad_hoc_command_factory()
def _test_unified_job(self, get, admin, template, job_attribute, list_endpoint):
if isinstance(template, AdHocCommand):
job = template
else:
job = template.create_unified_job()
kwargs = {job_attribute: job.pk}
for i in range(20):
job.event_class.create_from_data(**kwargs).save()
url = reverse(f'api:{list_endpoint}', kwargs={'pk': job.pk}) + '?limit=7'
resp = get(url, user=admin, expect=200)
assert 'count' not in resp.data
assert 'next' not in resp.data
assert 'previous' not in resp.data
assert len(resp.data['results']) == 7
@pytest.mark.django_db
def test_job(self, get, admin, job_template):
self._test_unified_job(get, admin, job_template, 'job_id', 'job_job_events_list')
@pytest.mark.django_db
def test_project_update(self, get, admin, project):
self._test_unified_job(get, admin, project, 'project_update_id', 'project_update_events_list')
@pytest.mark.django_db
def test_inventory_update(self, get, admin, inventory_source):
self._test_unified_job(get, admin, inventory_source, 'inventory_update_id', 'inventory_update_events_list')
@pytest.mark.django_db
def test_system_job(self, get, admin, system_job_template):
self._test_unified_job(get, admin, system_job_template, 'system_job_id', 'system_job_events_list')
@pytest.mark.django_db
def test_adhoc_command(self, get, admin, ad_hoc_command):
self._test_unified_job(get, admin, ad_hoc_command, 'ad_hoc_command_id', 'ad_hoc_command_ad_hoc_command_events_list')
| 34.953704
| 124
| 0.684768
|
6945047baa0c8cec9b789a182d3e0a4924fd25ff
| 3,051
|
py
|
Python
|
lib/surface/compute/rolling_updates/pause.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/rolling_updates/pause.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/rolling_updates/pause.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | 2
|
2020-11-04T03:08:21.000Z
|
2020-11-05T08:14:41.000Z
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""rolling-updates pause command."""
from googlecloudsdk.api_lib.compute import rolling_updates_util as updater_util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.third_party.apitools.base.py import exceptions as apitools_exceptions
class Pause(base.Command):
"""Pauses an existing update."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument('update', help='Update id.')
# TODO(user): Support --async which does not wait for state transition.
def Run(self, args):
"""Run 'rolling-updates pause'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Raises:
HttpException: An http error response was received while executing api
request.
ToolException: An error other than http error occured while executing
the command.
"""
client = self.context['updater_api']
messages = self.context['updater_messages']
resources = self.context['updater_resources']
ref = resources.Parse(
args.update,
collection='replicapoolupdater.rollingUpdates')
request = messages.ReplicapoolupdaterRollingUpdatesPauseRequest(
project=ref.project,
zone=ref.zone,
rollingUpdate=ref.rollingUpdate)
try:
operation = client.rollingUpdates.Pause(request)
operation_ref = resources.Parse(
operation.name,
collection='replicapoolupdater.zoneOperations')
result = updater_util.WaitForOperation(
client, operation_ref, 'Pausing the update')
if result:
log.status.write('Paused [{0}].\n'.format(ref))
else:
raise exceptions.ToolException('could not pause [{0}]'.format(ref))
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(updater_util.GetError(error))
Pause.detailed_help = {
'brief': 'Pauses an existing update.',
'DESCRIPTION': """\
Pauses the update in state ROLLING_FORWARD, ROLLING_BACK or PAUSED \
(fails if the update is in any other state).
No-op if invoked in state PAUSED.
""",
}
| 35.068966
| 89
| 0.706981
|
8dfd4fec68cf0faa623a7573934e27b6d82c8764
| 767
|
py
|
Python
|
app/controller/__init__.py
|
MacosPrintes001/webservice-paem
|
fa992e4bda40eaae3b585cee2ad2b65685104cc3
|
[
"Apache-2.0"
] | null | null | null |
app/controller/__init__.py
|
MacosPrintes001/webservice-paem
|
fa992e4bda40eaae3b585cee2ad2b65685104cc3
|
[
"Apache-2.0"
] | null | null | null |
app/controller/__init__.py
|
MacosPrintes001/webservice-paem
|
fa992e4bda40eaae3b585cee2ad2b65685104cc3
|
[
"Apache-2.0"
] | null | null | null |
from ..model import app
from .usuario_controller import UsuarioController
from .discente_controller import DiscenteController
from .docente_controller import DocenteController
from .tecnico_controller import TecnicoController
from .portaria_controller import PortariaController
from .direcao_controller import DirecaoController
from .coordenacao_controller import CoordenacaoController
from .curso_controller import CursoController
from .campus_controller import CampusController
from .reserva_recurso_servidores_controller import ReservaRecursoServidoresController
from .solicitacao_acesso_controller import SolicitacaoAcessoController
from .acesso_permitido_controller import AcessoPermitidoController
from .recurso_campus_controller import RecursoCampusController
| 51.133333
| 85
| 0.907432
|
d7eb616aa921299dc4048d584f259139ae7e17c6
| 19,508
|
py
|
Python
|
hummingbot/strategy/perpetual_market_making/perpetual_market_making_config_map.py
|
rince83/hummingbot
|
9023822744202624fad276b326cc999b72048d67
|
[
"Apache-2.0"
] | 4
|
2021-12-03T10:40:57.000Z
|
2022-03-28T10:32:48.000Z
|
hummingbot/strategy/perpetual_market_making/perpetual_market_making_config_map.py
|
rince83/hummingbot
|
9023822744202624fad276b326cc999b72048d67
|
[
"Apache-2.0"
] | null | null | null |
hummingbot/strategy/perpetual_market_making/perpetual_market_making_config_map.py
|
rince83/hummingbot
|
9023822744202624fad276b326cc999b72048d67
|
[
"Apache-2.0"
] | 3
|
2021-11-29T10:05:37.000Z
|
2021-12-12T15:35:00.000Z
|
from decimal import Decimal
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_validators import (
validate_exchange,
validate_derivative,
validate_market_trading_pair,
validate_bool,
validate_decimal,
validate_int
)
from hummingbot.client.settings import (
required_exchanges,
EXAMPLE_PAIRS,
)
from hummingbot.client.config.config_helpers import (
parse_cvar_value
)
from typing import Optional
def maker_trading_pair_prompt():
derivative = perpetual_market_making_config_map.get("derivative").value
example = EXAMPLE_PAIRS.get(derivative)
return "Enter the token trading pair you would like to trade on %s%s >>> " \
% (derivative, f" (e.g. {example})" if example else "")
# strategy specific validators
def validate_derivative_trading_pair(value: str) -> Optional[str]:
derivative = perpetual_market_making_config_map.get("derivative").value
return validate_market_trading_pair(derivative, value)
def validate_derivative_position_mode(value: str) -> Optional[str]:
if value not in ["One-way", "Hedge"]:
return "Position mode can either be One-way or Hedge mode"
def order_amount_prompt() -> str:
trading_pair = perpetual_market_making_config_map["market"].value
base_asset, quote_asset = trading_pair.split("-")
return f"What is the amount of {base_asset} per order? >>> "
def validate_price_source(value: str) -> Optional[str]:
if value not in {"current_market", "external_market", "custom_api"}:
return "Invalid price source type."
def on_validate_price_source(value: str):
if value != "external_market":
perpetual_market_making_config_map["price_source_derivative"].value = None
perpetual_market_making_config_map["price_source_market"].value = None
perpetual_market_making_config_map["take_if_crossed"].value = None
if value != "custom_api":
perpetual_market_making_config_map["price_source_custom_api"].value = None
else:
perpetual_market_making_config_map["price_type"].value = "custom"
def validate_price_type(value: str) -> Optional[str]:
error = None
price_source = perpetual_market_making_config_map.get("price_source").value
if price_source != "custom_api":
valid_values = {"mid_price",
"last_price",
"last_own_trade_price",
"best_bid",
"best_ask"}
if value not in valid_values:
error = "Invalid price type."
elif value != "custom":
error = "Invalid price type."
return error
def price_source_market_prompt() -> str:
external_market = perpetual_market_making_config_map.get("price_source_derivative").value
return f'Enter the token trading pair on {external_market} >>> '
def validate_price_source_derivative(value: str) -> Optional[str]:
if value == perpetual_market_making_config_map.get("derivative").value:
return "Price source derivative cannot be the same as maker derivative."
if validate_derivative(value) is not None and validate_exchange(value) is not None:
return "Price must must be a valid exchange or derivative connector."
def on_validated_price_source_derivative(value: str):
if value is None:
perpetual_market_making_config_map["price_source_market"].value = None
def validate_price_source_market(value: str) -> Optional[str]:
market = perpetual_market_making_config_map.get("price_source_derivative").value
return validate_market_trading_pair(market, value)
def validate_price_floor_ceiling(value: str) -> Optional[str]:
try:
decimal_value = Decimal(value)
except Exception:
return f"{value} is not in decimal format."
if not (decimal_value == Decimal("-1") or decimal_value > Decimal("0")):
return "Value must be more than 0 or -1 to disable this feature."
def validate_take_if_crossed(value: str) -> Optional[str]:
err_msg = validate_bool(value)
if err_msg is not None:
return err_msg
price_source_enabled = perpetual_market_making_config_map["price_source_enabled"].value
take_if_crossed = parse_cvar_value(perpetual_market_making_config_map["take_if_crossed"], value)
if take_if_crossed and not price_source_enabled:
return "You can enable this feature only when external pricing source for mid-market price is used."
def derivative_on_validated(value: str):
required_exchanges.append(value)
perpetual_market_making_config_map = {
"strategy":
ConfigVar(key="strategy",
prompt=None,
default="perpetual_market_making"),
"derivative":
ConfigVar(key="derivative",
prompt="Enter your maker derivative connector >>> ",
validator=validate_derivative,
on_validated=derivative_on_validated,
prompt_on_new=True),
"market":
ConfigVar(key="market",
prompt=maker_trading_pair_prompt,
validator=validate_derivative_trading_pair,
prompt_on_new=True),
"leverage":
ConfigVar(key="leverage",
prompt="How much leverage do you want to use? "
"(Binance Perpetual supports up to 75X for most pairs) >>> ",
type_str="int",
validator=lambda v: validate_int(v, min_value=0, inclusive=False),
prompt_on_new=True),
"position_mode":
ConfigVar(key="position_mode",
prompt="Which position mode do you want to use? (One-way/Hedge) >>> ",
validator=validate_derivative_position_mode,
type_str="str",
default="One-way",
prompt_on_new=True),
"bid_spread":
ConfigVar(key="bid_spread",
prompt="How far away from the mid price do you want to place the "
"first bid order? (Enter 1 to indicate 1%) >>> ",
type_str="decimal",
validator=lambda v: validate_decimal(v, 0, 100, inclusive=False),
prompt_on_new=True),
"ask_spread":
ConfigVar(key="ask_spread",
prompt="How far away from the mid price do you want to place the "
"first ask order? (Enter 1 to indicate 1%) >>> ",
type_str="decimal",
validator=lambda v: validate_decimal(v, 0, 100, inclusive=False),
prompt_on_new=True),
"minimum_spread":
ConfigVar(key="minimum_spread",
prompt="At what minimum spread should the bot automatically cancel orders? (Enter 1 for 1%) >>> ",
required_if=lambda: False,
type_str="decimal",
default=Decimal(-100),
validator=lambda v: validate_decimal(v, -100, 100, True)),
"order_refresh_time":
ConfigVar(key="order_refresh_time",
prompt="How often do you want to cancel and replace bids and asks "
"(in seconds)? >>> ",
type_str="float",
validator=lambda v: validate_decimal(v, 0, inclusive=False),
prompt_on_new=True),
"order_refresh_tolerance_pct":
ConfigVar(key="order_refresh_tolerance_pct",
prompt="Enter the percent change in price needed to refresh orders at each cycle "
"(Enter 1 to indicate 1%) >>> ",
type_str="decimal",
default=Decimal("0"),
validator=lambda v: validate_decimal(v, -10, 10, inclusive=True)),
"order_amount":
ConfigVar(key="order_amount",
prompt=order_amount_prompt,
type_str="decimal",
validator=lambda v: validate_decimal(v, min_value=Decimal("0"), inclusive=False),
prompt_on_new=True),
"position_management":
ConfigVar(key="position_management",
prompt="How would you like to manage your positions? (Profit_taking/Trailing_stop) >>> ",
type_str="str",
default="Profit_taking",
validator=lambda s: None if s in {"Profit_taking", "Trailing_stop"} else
"Invalid position management.",
prompt_on_new=True),
"long_profit_taking_spread":
ConfigVar(key="long_profit_taking_spread",
prompt="At what spread from the entry price do you want to place a short order to reduce position? (Enter 1 for 1%) >>> ",
required_if=lambda: perpetual_market_making_config_map.get("position_management").value == "Profit_taking",
type_str="decimal",
default=Decimal("0"),
validator=lambda v: validate_decimal(v, 0, 100, True),
prompt_on_new=True),
"short_profit_taking_spread":
ConfigVar(key="short_profit_taking_spread",
prompt="At what spread from the position entry price do you want to place a long order to reduce position? (Enter 1 for 1%) >>> ",
required_if=lambda: perpetual_market_making_config_map.get("position_management").value == "Profit_taking",
type_str="decimal",
default=Decimal("0"),
validator=lambda v: validate_decimal(v, 0, 100, True),
prompt_on_new=True),
"ts_activation_spread":
ConfigVar(key="ts_activation_spread",
prompt="At what spread from the position entry price do you want the bot to start trailing? (Enter 1 for 1%) >>> ",
required_if=lambda: perpetual_market_making_config_map.get("position_management").value == "Trailing_stop",
type_str="decimal",
default=Decimal("0"),
validator=lambda v: validate_decimal(v, 0, 100, True),
prompt_on_new=True),
"ts_callback_rate":
ConfigVar(key="ts_callback_rate",
prompt="At what spread away from the trailing peak price do you want positions to remain open before they're closed? (Enter 1 for 1%) >>> ",
required_if=lambda: perpetual_market_making_config_map.get("position_management").value == "Trailing_stop",
type_str="decimal",
default=Decimal("0"),
validator=lambda v: validate_decimal(v, 0, 100, True),
prompt_on_new=True),
"stop_loss_spread":
ConfigVar(key="stop_loss_spread",
prompt="At what spread from position entry price do you want to place stop_loss order? (Enter 1 for 1%) >>> ",
type_str="decimal",
default=Decimal("0"),
validator=lambda v: validate_decimal(v, 0, 101, False),
prompt_on_new=True),
"close_position_order_type":
ConfigVar(key="close_position_order_type",
prompt="What order type do you want trailing stop and/or stop loss features to use for closing positions? (LIMIT/MARKET) >>> ",
type_str="str",
default="LIMIT",
validator=lambda s: None if s in {"LIMIT", "MARKET"} else
"Invalid order type.",
prompt_on_new=True),
"price_ceiling":
ConfigVar(key="price_ceiling",
prompt="Enter the price point above which only sell orders will be placed "
"(Enter -1 to deactivate this feature) >>> ",
type_str="decimal",
default=Decimal("-1"),
validator=validate_price_floor_ceiling),
"price_floor":
ConfigVar(key="price_floor",
prompt="Enter the price below which only buy orders will be placed "
"(Enter -1 to deactivate this feature) >>> ",
type_str="decimal",
default=Decimal("-1"),
validator=validate_price_floor_ceiling),
"ping_pong_enabled":
ConfigVar(key="ping_pong_enabled",
prompt="Would you like to use the ping pong feature and alternate between buy and sell orders after fills? (Yes/No) >>> ",
type_str="bool",
default=False,
validator=validate_bool),
"order_levels":
ConfigVar(key="order_levels",
prompt="How many orders do you want to place on both sides? >>> ",
type_str="int",
validator=lambda v: validate_int(v, min_value=0, inclusive=False),
default=1),
"order_level_amount":
ConfigVar(key="order_level_amount",
prompt="How much do you want to increase or decrease the order size for each "
"additional order? (decrease < 0 > increase) >>> ",
required_if=lambda: perpetual_market_making_config_map.get("order_levels").value > 1,
type_str="decimal",
validator=lambda v: validate_decimal(v),
default=0),
"order_level_spread":
ConfigVar(key="order_level_spread",
prompt="Enter the price increments (as percentage) for subsequent "
"orders? (Enter 1 to indicate 1%) >>> ",
required_if=lambda: perpetual_market_making_config_map.get("order_levels").value > 1,
type_str="decimal",
validator=lambda v: validate_decimal(v, 0, 100, inclusive=False),
default=Decimal("1")),
"filled_order_delay":
ConfigVar(key="filled_order_delay",
prompt="How long do you want to wait before placing the next order "
"if your order gets filled (in seconds)? >>> ",
type_str="float",
validator=lambda v: validate_decimal(v, min_value=0, inclusive=False),
default=60),
"hanging_orders_enabled":
ConfigVar(key="hanging_orders_enabled",
prompt="Do you want to enable hanging orders? (Yes/No) >>> ",
type_str="bool",
default=False,
validator=validate_bool),
"hanging_orders_cancel_pct":
ConfigVar(key="hanging_orders_cancel_pct",
prompt="At what spread percentage (from mid price) will hanging orders be canceled? "
"(Enter 1 to indicate 1%) >>> ",
required_if=lambda: perpetual_market_making_config_map.get("hanging_orders_enabled").value,
type_str="decimal",
default=Decimal("10"),
validator=lambda v: validate_decimal(v, 0, 100, inclusive=False)),
"order_optimization_enabled":
ConfigVar(key="order_optimization_enabled",
prompt="Do you want to enable best bid ask jumping? (Yes/No) >>> ",
type_str="bool",
default=False,
validator=validate_bool),
"ask_order_optimization_depth":
ConfigVar(key="ask_order_optimization_depth",
prompt="How deep do you want to go into the order book for calculating "
"the top ask, ignoring dust orders on the top "
"(expressed in base asset amount)? >>> ",
required_if=lambda: perpetual_market_making_config_map.get("order_optimization_enabled").value,
type_str="decimal",
validator=lambda v: validate_decimal(v, min_value=0),
default=0),
"bid_order_optimization_depth":
ConfigVar(key="bid_order_optimization_depth",
prompt="How deep do you want to go into the order book for calculating "
"the top bid, ignoring dust orders on the top "
"(expressed in base asset amount)? >>> ",
required_if=lambda: perpetual_market_making_config_map.get("order_optimization_enabled").value,
type_str="decimal",
validator=lambda v: validate_decimal(v, min_value=0),
default=0),
"add_transaction_costs":
ConfigVar(key="add_transaction_costs",
prompt="Do you want to add transaction costs automatically to order prices? (Yes/No) >>> ",
type_str="bool",
default=False,
validator=validate_bool),
"price_source":
ConfigVar(key="price_source",
prompt="Which price source to use? (current_market/external_market/custom_api) >>> ",
type_str="str",
default="current_market",
validator=validate_price_source,
on_validated=on_validate_price_source),
"price_type":
ConfigVar(key="price_type",
prompt="Which price type to use? (mid_price/last_price/last_own_trade_price/best_bid/best_ask) >>> ",
type_str="str",
required_if=lambda: perpetual_market_making_config_map.get("price_source").value != "custom_api",
default="mid_price",
validator=validate_price_type),
"price_source_derivative":
ConfigVar(key="price_source_derivative",
prompt="Enter external price source connector name or derivative name >>> ",
required_if=lambda: perpetual_market_making_config_map.get("price_source").value == "external_market",
type_str="str",
validator=validate_price_source_derivative,
on_validated=on_validated_price_source_derivative),
"price_source_market":
ConfigVar(key="price_source_market",
prompt=price_source_market_prompt,
required_if=lambda: perpetual_market_making_config_map.get("price_source").value == "external_market",
type_str="str",
validator=validate_price_source_market),
"take_if_crossed":
ConfigVar(key="take_if_crossed",
prompt="Do you want to take the best order if orders cross the orderbook? (Yes/No) >>> ",
required_if=lambda: perpetual_market_making_config_map.get(
"price_source").value == "external_market",
type_str="bool",
validator=validate_bool),
"price_source_custom_api":
ConfigVar(key="price_source_custom_api",
prompt="Enter pricing API URL >>> ",
required_if=lambda: perpetual_market_making_config_map.get("price_source").value == "custom_api",
type_str="str"),
"custom_api_update_interval":
ConfigVar(key="custom_api_update_interval",
prompt="Enter custom API update interval in second (default: 5.0, min: 0.5) >>> ",
required_if=lambda: False,
default=float(5),
type_str="float",
validator=lambda v: validate_decimal(v, Decimal("0.5"))),
"order_override":
ConfigVar(key="order_override",
prompt=None,
required_if=lambda: False,
default=None,
type_str="json"),
}
| 49.262626
| 158
| 0.604726
|
f01a444d8c9edd7eeb5b483e2e12ba5a74028e11
| 1,223
|
py
|
Python
|
collision_metric/planning/BFS.py
|
Jan-Blaha/pedestrian-collision-metric
|
06863161e3a12e52a78c1bf4df0439b3f90daef6
|
[
"MIT"
] | null | null | null |
collision_metric/planning/BFS.py
|
Jan-Blaha/pedestrian-collision-metric
|
06863161e3a12e52a78c1bf4df0439b3f90daef6
|
[
"MIT"
] | null | null | null |
collision_metric/planning/BFS.py
|
Jan-Blaha/pedestrian-collision-metric
|
06863161e3a12e52a78c1bf4df0439b3f90daef6
|
[
"MIT"
] | null | null | null |
import heapq
from collision_metric.state_spaces.StateSpace import StateSpace
class BFS:
space = None # type: StateSpace
def __init__(self, state_space=None):
if state_space is None or issubclass(type(state_space), StateSpace):
raise Exception("No or invalid state space specified.")
self.space = state_space
def set_data(self, data):
self.space.set_data(data)
# MAIN SEARCH ALGORITHM
def search(self, source, destination):
queue = []
heapq.heapify(queue)
start = self.space.get_starting_state(source)
dest = self.space.set_destination_state(destination)
heapq.heappush(queue, (0, 0, start))
self.space.mark_visited(start)
curr = start
while len(queue) > 0:
curr = heapq.heappop(queue)
if curr[2] == dest:
break
children = self.space.expand(curr[2])
for i in range(len(children)):
heapq.heappush(queue, (children[i].get_cost() + curr[0], curr[1] + 1, children[i]))
self.space.mark_visited(children[i])
return self.space.remake_path(curr[2])
| 27.795455
| 100
| 0.589534
|
666b521b42ecbb15675721aac1773d8c4ff1909b
| 13,906
|
py
|
Python
|
alipay/aop/api/request/AlipayOpenAgentMobilepaySignRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/AlipayOpenAgentMobilepaySignRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/AlipayOpenAgentMobilepaySignRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenAgentMobilepaySignRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._app_market = None
self._app_name = None
self._app_status = None
self._app_test_account = None
self._app_test_account_password = None
self._app_type = None
self._batch_no = None
self._business_license_mobile = None
self._business_license_no = None
self._date_limitation = None
self._download_link = None
self._long_term = None
self._mcc_code = None
self._app_auth_pic = None
self._app_demo = None
self._business_license_auth_pic = None
self._business_license_pic = None
self._home_screenshot = None
self._in_app_screenshot = None
self._pay_screenshot = None
self._special_license_pic = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def app_market(self):
return self._app_market
@app_market.setter
def app_market(self, value):
if isinstance(value, list):
self._app_market = list()
for i in value:
self._app_market.append(i)
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_status(self):
return self._app_status
@app_status.setter
def app_status(self, value):
self._app_status = value
@property
def app_test_account(self):
return self._app_test_account
@app_test_account.setter
def app_test_account(self, value):
self._app_test_account = value
@property
def app_test_account_password(self):
return self._app_test_account_password
@app_test_account_password.setter
def app_test_account_password(self, value):
self._app_test_account_password = value
@property
def app_type(self):
return self._app_type
@app_type.setter
def app_type(self, value):
if isinstance(value, list):
self._app_type = list()
for i in value:
self._app_type.append(i)
@property
def batch_no(self):
return self._batch_no
@batch_no.setter
def batch_no(self, value):
self._batch_no = value
@property
def business_license_mobile(self):
return self._business_license_mobile
@business_license_mobile.setter
def business_license_mobile(self, value):
self._business_license_mobile = value
@property
def business_license_no(self):
return self._business_license_no
@business_license_no.setter
def business_license_no(self, value):
self._business_license_no = value
@property
def date_limitation(self):
return self._date_limitation
@date_limitation.setter
def date_limitation(self, value):
self._date_limitation = value
@property
def download_link(self):
return self._download_link
@download_link.setter
def download_link(self, value):
self._download_link = value
@property
def long_term(self):
return self._long_term
@long_term.setter
def long_term(self, value):
self._long_term = value
@property
def mcc_code(self):
return self._mcc_code
@mcc_code.setter
def mcc_code(self, value):
self._mcc_code = value
@property
def app_auth_pic(self):
return self._app_auth_pic
@app_auth_pic.setter
def app_auth_pic(self, value):
if not isinstance(value, FileItem):
return
self._app_auth_pic = value
@property
def app_demo(self):
return self._app_demo
@app_demo.setter
def app_demo(self, value):
if not isinstance(value, FileItem):
return
self._app_demo = value
@property
def business_license_auth_pic(self):
return self._business_license_auth_pic
@business_license_auth_pic.setter
def business_license_auth_pic(self, value):
if not isinstance(value, FileItem):
return
self._business_license_auth_pic = value
@property
def business_license_pic(self):
return self._business_license_pic
@business_license_pic.setter
def business_license_pic(self, value):
if not isinstance(value, FileItem):
return
self._business_license_pic = value
@property
def home_screenshot(self):
return self._home_screenshot
@home_screenshot.setter
def home_screenshot(self, value):
if not isinstance(value, FileItem):
return
self._home_screenshot = value
@property
def in_app_screenshot(self):
return self._in_app_screenshot
@in_app_screenshot.setter
def in_app_screenshot(self, value):
if not isinstance(value, FileItem):
return
self._in_app_screenshot = value
@property
def pay_screenshot(self):
return self._pay_screenshot
@pay_screenshot.setter
def pay_screenshot(self, value):
if not isinstance(value, FileItem):
return
self._pay_screenshot = value
@property
def special_license_pic(self):
return self._special_license_pic
@special_license_pic.setter
def special_license_pic(self, value):
if not isinstance(value, FileItem):
return
self._special_license_pic = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.agent.mobilepay.sign'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.app_market:
if isinstance(self.app_market, list):
for i in range(0, len(self.app_market)):
element = self.app_market[i]
if hasattr(element, 'to_alipay_dict'):
self.app_market[i] = element.to_alipay_dict()
params['app_market'] = json.dumps(obj=self.app_market, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = json.dumps(obj=self.app_name.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_name'] = self.app_name
if self.app_status:
if hasattr(self.app_status, 'to_alipay_dict'):
params['app_status'] = json.dumps(obj=self.app_status.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_status'] = self.app_status
if self.app_test_account:
if hasattr(self.app_test_account, 'to_alipay_dict'):
params['app_test_account'] = json.dumps(obj=self.app_test_account.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_test_account'] = self.app_test_account
if self.app_test_account_password:
if hasattr(self.app_test_account_password, 'to_alipay_dict'):
params['app_test_account_password'] = json.dumps(obj=self.app_test_account_password.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_test_account_password'] = self.app_test_account_password
if self.app_type:
if isinstance(self.app_type, list):
for i in range(0, len(self.app_type)):
element = self.app_type[i]
if hasattr(element, 'to_alipay_dict'):
self.app_type[i] = element.to_alipay_dict()
params['app_type'] = json.dumps(obj=self.app_type, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.batch_no:
if hasattr(self.batch_no, 'to_alipay_dict'):
params['batch_no'] = json.dumps(obj=self.batch_no.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['batch_no'] = self.batch_no
if self.business_license_mobile:
if hasattr(self.business_license_mobile, 'to_alipay_dict'):
params['business_license_mobile'] = json.dumps(obj=self.business_license_mobile.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['business_license_mobile'] = self.business_license_mobile
if self.business_license_no:
if hasattr(self.business_license_no, 'to_alipay_dict'):
params['business_license_no'] = json.dumps(obj=self.business_license_no.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['business_license_no'] = self.business_license_no
if self.date_limitation:
if hasattr(self.date_limitation, 'to_alipay_dict'):
params['date_limitation'] = json.dumps(obj=self.date_limitation.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['date_limitation'] = self.date_limitation
if self.download_link:
if hasattr(self.download_link, 'to_alipay_dict'):
params['download_link'] = json.dumps(obj=self.download_link.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['download_link'] = self.download_link
if self.long_term:
if hasattr(self.long_term, 'to_alipay_dict'):
params['long_term'] = json.dumps(obj=self.long_term.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['long_term'] = self.long_term
if self.mcc_code:
if hasattr(self.mcc_code, 'to_alipay_dict'):
params['mcc_code'] = json.dumps(obj=self.mcc_code.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['mcc_code'] = self.mcc_code
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
if self.app_auth_pic:
multipart_params['app_auth_pic'] = self.app_auth_pic
if self.app_demo:
multipart_params['app_demo'] = self.app_demo
if self.business_license_auth_pic:
multipart_params['business_license_auth_pic'] = self.business_license_auth_pic
if self.business_license_pic:
multipart_params['business_license_pic'] = self.business_license_pic
if self.home_screenshot:
multipart_params['home_screenshot'] = self.home_screenshot
if self.in_app_screenshot:
multipart_params['in_app_screenshot'] = self.in_app_screenshot
if self.pay_screenshot:
multipart_params['pay_screenshot'] = self.pay_screenshot
if self.special_license_pic:
multipart_params['special_license_pic'] = self.special_license_pic
return multipart_params
| 34.506203
| 176
| 0.638717
|
19e719fc7eced7f4ea52d6e1caa493a0fd459527
| 5,302
|
py
|
Python
|
collection/plugins/modules/thola_read_interfaces_facts.py
|
inexio/thola-ansible
|
f07618d69873fe6fb81941baec522646397d2d54
|
[
"BSD-2-Clause"
] | 3
|
2021-05-28T09:05:53.000Z
|
2021-06-25T20:04:44.000Z
|
collection/plugins/modules/thola_read_interfaces_facts.py
|
inexio/thola-ansible
|
f07618d69873fe6fb81941baec522646397d2d54
|
[
"BSD-2-Clause"
] | null | null | null |
collection/plugins/modules/thola_read_interfaces_facts.py
|
inexio/thola-ansible
|
f07618d69873fe6fb81941baec522646397d2d54
|
[
"BSD-2-Clause"
] | null | null | null |
import json
import sys
import urllib3
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = """
---
module: thola_read_interfaces_facts
author: "Thola team"
version_added: "1.0.5"
short_description: "Reads interfaces of a given device"
description:
- "Reads the interfaces of a given device with SNMP"
requirements:
- thola-client-module-python
options:
api_host:
description:
- Hostname of the running Thola API instance
required: True
host:
description:
- IP of the device you want to identify
required: True
community:
description:
- SNMP community of the device
version:
description:
- SNMP version that should be used to connect to the device
port:
description:
- The port you want Thola to connect to the device
discover_parallel_request:
description:
- Sets the number of possible parallel requests
discover_retries:
description:
- Sets the number of discovery retries
discover_timeout:
description:
- Sets the discover timeout
"""
EXAMPLES = """
- name: thola read interfaces
thola_read_interfaces_facts:
api_host: '{{ api_host }}'
host: '{{ host }}'
community: '{{ community }}'
version: '{{ version }}'
port: '{{ port }}'
discover_parallel_request: '{{ discover_parallel_request }}'
discover_retries: '{{ discover_retries }}'
discover_timeout: '{{ discover_timeout }}'
register: result
"""
RETURN = """
changed:
description: "whether the command has been executed on the device"
returned: always
type: bool
sample: True
thola_read_interfaces_facts:
description: "Interfaces facts"
returned: always
type: dict
"""
def change_quotation_marks(obj):
if isinstance(obj, dict):
for key, value in obj.items():
if isinstance(value, dict):
change_quotation_marks(value)
elif isinstance(value, str):
obj[key] = obj[key].replace("\"", "'")
else:
pass
return obj
thola_client_found = False
try:
import thola_client.api.read_api as read
import thola_client.rest as rest
import thola_client
thola_client_found = True
except ImportError:
pass
def main():
sys.stderr = None
module = AnsibleModule(
argument_spec=dict(
api_host=dict(type="str", required=True),
host=dict(type="str", required=True),
community=dict(type="str", required=False),
version=dict(type="str", required=False),
port=dict(type="int", required=False),
discover_parallel_request=dict(type="int", required=False),
discover_retries=dict(type="int", required=False),
discover_timeout=dict(type="int", required=False)
),
supports_check_mode=True,
)
if not thola_client_found:
module.fail_json("The thola-client-module is not installed")
host = module.params["host"]
api_host = module.params["api_host"]
argument_check = {"host": host, "api_host": api_host}
for key, val in argument_check.items():
if val is None:
module.fail_json(msg=str(key) + " is required")
return
if module.params["version"] is None:
version = "2c"
else:
version = module.params["version"]
if module.params["community"] is None:
community = "public"
else:
community = module.params["community"]
if module.params["port"] is None:
port = 161
else:
port = module.params["port"]
if module.params["discover_parallel_request"] is None:
discover_parallel_request = 5
else:
discover_parallel_request = module.params["discover_parallel_request"]
if module.params["discover_retries"] is None:
discover_retries = 0
else:
discover_retries = module.params["discover_retries"]
if module.params["discover_timeout"] is None:
discover_timeout = 2
else:
discover_timeout = module.params["discover_timeout"]
body = thola_client.ReadInterfacesRequest(
device_data=thola_client.DeviceData(
ip_address=host,
connection_data=thola_client.ConnectionData(
snmp=thola_client.SNMPConnectionData(
communities=[community],
versions=[version],
ports=[port],
discover_retries=discover_retries,
discover_timeout=discover_timeout,
discover_parallel_requests=discover_parallel_request
)
)
)
)
read_api = read.ReadApi()
read_api.api_client.configuration.host = api_host
try:
result_dict = read_api.read_interfaces(body=body).to_dict()
except rest.ApiException as e:
module.fail_json(**json.loads(e.body))
return
except urllib3.exceptions.MaxRetryError:
module.fail_json("Can't connect to Thola API!")
return
result_dict = change_quotation_marks(result_dict)
results = {"changed": False, "ansible_facts": result_dict}
module.exit_json(**results)
if __name__ == "__main__":
main()
| 28.972678
| 78
| 0.63146
|
4d3247d661fe96d8f522096ce584768901ef550e
| 3,340
|
py
|
Python
|
boto3/__init__.py
|
adamatan/boto3
|
4f2a12de5285d036cef6f61a9a8bbda05f7a761e
|
[
"Apache-2.0"
] | null | null | null |
boto3/__init__.py
|
adamatan/boto3
|
4f2a12de5285d036cef6f61a9a8bbda05f7a761e
|
[
"Apache-2.0"
] | null | null | null |
boto3/__init__.py
|
adamatan/boto3
|
4f2a12de5285d036cef6f61a9a8bbda05f7a761e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '1.9.195'
# The default Boto3 session; autoloaded when needed.
DEFAULT_SESSION = None
def setup_default_session(**kwargs):
"""
Set up a default session, passing through any parameters to the session
constructor. There is no need to call this unless you wish to pass custom
parameters, because a default session will be created for you.
"""
global DEFAULT_SESSION
DEFAULT_SESSION = Session(**kwargs)
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
"""
Add a stream handler for the given name and level to the logging module.
By default, this logs all boto3 messages to ``stdout``.
>>> import boto3
>>> boto3.set_stream_logger('boto3.resources', logging.INFO)
For debugging purposes a good choice is to set the stream logger to ``''``
which is equivalent to saying "log everything".
.. WARNING::
Be aware that when logging anything from ``'botocore'`` the full wire
trace will appear in your logs. If your payloads contain sensitive data
this should not be used in production.
:type name: string
:param name: Log name
:type level: int
:param level: Logging level, e.g. ``logging.INFO``
:type format_string: str
:param format_string: Log message format
"""
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def _get_default_session():
"""
Get the default session, creating one if needed.
:rtype: :py:class:`~boto3.session.Session`
:return: The default session
"""
if DEFAULT_SESSION is None:
setup_default_session()
return DEFAULT_SESSION
def client(*args, **kwargs):
"""
Create a low-level service client by name using the default session.
See :py:meth:`boto3.session.Session.client`.
"""
return _get_default_session().client(*args, **kwargs)
def resource(*args, **kwargs):
"""
Create a resource service client by name using the default session.
See :py:meth:`boto3.session.Session.resource`.
"""
return _get_default_session().resource(*args, **kwargs)
# Set up logging to ``/dev/null`` like a library is supposed to.
# http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('boto3').addHandler(NullHandler())
| 30.09009
| 81
| 0.703593
|
3df668f29edabb50190a7b44daa2a191125be6d5
| 4,012
|
py
|
Python
|
SuperSafety/Utils/HistoryStructs.py
|
BDEvan5/SuperSafety
|
73edd8d8b191e291a6f369043698b8763887a1f7
|
[
"Apache-2.0"
] | null | null | null |
SuperSafety/Utils/HistoryStructs.py
|
BDEvan5/SuperSafety
|
73edd8d8b191e291a6f369043698b8763887a1f7
|
[
"Apache-2.0"
] | null | null | null |
SuperSafety/Utils/HistoryStructs.py
|
BDEvan5/SuperSafety
|
73edd8d8b191e291a6f369043698b8763887a1f7
|
[
"Apache-2.0"
] | null | null | null |
import os, shutil
import csv
import numpy as np
from matplotlib import pyplot as plt
SIZE = 20000
def plot_data(values, moving_avg_period=10, title="Results", figure_n=2):
plt.figure(figure_n)
plt.clf()
plt.title(title)
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(values)
moving_avg = moving_average(values, moving_avg_period)
plt.plot(moving_avg)
moving_avg = moving_average(values, moving_avg_period * 5)
plt.plot(moving_avg)
plt.pause(0.001)
def moving_average(data, period):
return np.convolve(data, np.ones(period), 'same') / period
class TrainHistory():
def __init__(self, agent_name, conf, load=False) -> None:
self.agent_name = agent_name
self.path = conf.vehicle_path + self.agent_name
# training data
self.ptr = 0
self.lengths = np.zeros(SIZE)
self.rewards = np.zeros(SIZE)
self.t_counter = 0 # total steps
self.step_rewards = []
# espisode data
self.ep_counter = 0 # ep steps
self.ep_reward = 0
self.ep_rewards = []
if not load:
self.init_file_struct()
def init_file_struct(self):
path = os.getcwd() +'/' + self.path
if os.path.exists(path):
try:
os.rmdir(path)
except:
shutil.rmtree(path)
os.mkdir(path)
def add_step_data(self, new_r):
self.ep_reward += new_r
self.ep_rewards.append(new_r)
self.ep_counter += 1
self.t_counter += 1
self.step_rewards.append(new_r)
def lap_done(self, show_reward=False):
self.lengths[self.ptr] = self.ep_counter
self.rewards[self.ptr] = self.ep_reward
# print(f"EP reward: {self.ep_reward:.2f}")
self.ptr += 1
if show_reward:
plt.figure(8)
plt.clf()
plt.plot(self.ep_rewards)
plt.plot(self.ep_rewards, 'x', markersize=10)
plt.title(f"Ep rewards: total: {self.ep_reward:.4f}")
plt.ylim([-1.1, 1.5])
plt.pause(0.0001)
self.ep_counter = 0
self.ep_reward = 0
self.ep_rewards = []
def print_update(self, plot_reward=True):
if self.ptr < 10:
return
mean10 = np.mean(self.rewards[self.ptr-10:self.ptr])
mean100 = np.mean(self.rewards[max(0, self.ptr-100):self.ptr])
# score = moving_average(self.rewards[self.ptr-100:self.ptr], 10)
print(f"Run: {self.t_counter} --> Moving10: {mean10:.2f} --> Moving100: {mean100:.2f} ")
if plot_reward:
# raise NotImplementedError
plot_data(self.rewards[0:self.ptr], figure_n=2)
def save_csv_data(self):
data = []
for i in range(len(self.rewards)):
data.append([i, self.rewards[i], self.lengths[i]])
full_name = self.path + '/training_data.csv'
with open(full_name, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerows(data)
data = []
for i in range(len(self.step_rewards)):
data.append([i, self.step_rewards[i]])
full_name = self.path + '/step_data.csv'
with open(full_name, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerows(data)
plot_data(self.rewards[0:self.ptr], figure_n=2)
plt.figure(2)
plt.savefig(self.path + "/training_rewards.png")
def moving_average(data, period):
return np.convolve(data, np.ones(period), 'same') / period
class RewardAnalyser:
def __init__(self) -> None:
self.rewards = []
self.t = 0
def add_reward(self, new_r):
self.rewards.append(new_r)
self.t += 1
def show_rewards(self, show=False):
plt.figure(6)
plt.plot(self.rewards, '-*')
plt.ylim([-1, 1])
plt.title('Reward History')
if show:
plt.show()
| 29.718519
| 97
| 0.579013
|
26d668fa2dd39a9128d13c49817fb75e09ce98ec
| 2,002
|
py
|
Python
|
autotest/gdrivers/eir.py
|
dtusk/gdal1
|
30dcdc1eccbca2331674f6421f1c5013807da609
|
[
"MIT"
] | 3
|
2017-01-12T10:18:56.000Z
|
2020-03-21T16:42:55.000Z
|
autotest/gdrivers/eir.py
|
ShinNoNoir/gdal-1.11.5-vs2015
|
5d544e176a4c11f9bcd12a0fe66f97fd157824e6
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/eir.py
|
ShinNoNoir/gdal-1.11.5-vs2015
|
5d544e176a4c11f9bcd12a0fe66f97fd157824e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test EIR driver
# Author: Even Rouault, <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2009, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import gdal
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Test a fake EIR dataset
def eir_1():
tst = gdaltest.GDALTest( 'EIR', 'fakeeir.hdr', 1, 1 )
return tst.testOpen()
gdaltest_list = [
eir_1 ]
if __name__ == '__main__':
gdaltest.setup_run( 'eir' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 34.517241
| 79
| 0.621878
|
af9bd09bef8c7698aaef8edcc412d994ff8ceafc
| 2,319
|
py
|
Python
|
python3/koans/about_asserts.py
|
mitulp236/python-koans
|
8b39cdc346576bbbd4a56c3ff1b90ed8ea070db4
|
[
"MIT"
] | null | null | null |
python3/koans/about_asserts.py
|
mitulp236/python-koans
|
8b39cdc346576bbbd4a56c3ff1b90ed8ea070db4
|
[
"MIT"
] | null | null | null |
python3/koans/about_asserts.py
|
mitulp236/python-koans
|
8b39cdc346576bbbd4a56c3ff1b90ed8ea070db4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
"""
We shall contemplate truth by testing reality, via asserts.
"""
# Confused? This video should help:
#
# http://bit.ly/about_asserts
self.assertTrue(True) # This should be True
def test_assert_with_message(self):
"""
Enlightenment may be more easily achieved with appropriate messages.
"""
self.assertTrue(False, "This should be True -- Please fix this")
def test_fill_in_values(self):
"""
Sometimes we will ask you to fill in the values
"""
self.assertEqual(__, 1 + 1)
def test_assert_equality(self):
"""
To understand reality, we must compare our expectations against reality.
"""
expected_value = __
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
"""
Some ways of asserting equality are better than others.
"""
expected_value = __
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
"""
Understand what lies within.
"""
# This throws an AssertionError exception
assert False
def test_that_sometimes_we_need_to_know_the_class_type(self):
"""
What is in a class name?
"""
# Sometimes we will ask you what the class type of an object is.
#
# For example, contemplate the text string "navel". What is its class type?
# The koans runner will include this feedback for this koan:
#
# AssertionError: '-=> FILL ME IN! <=-' != <type 'str'>
#
# So "navel".__class__ is equal to <type 'str'>? No not quite. This
# is just what it displays. The answer is simply str.
#
# See for yourself:
self.assertEqual(__, "navel".__class__) # It's str, not <type 'str'>
# Need an illustration? More reading can be found here:
#
# https://github.com/gregmalcolm/python_koans/wiki/Class-Attribute
| 29.35443
| 83
| 0.606296
|
9ba9486bd024752c03c2077eeffde9273a6497e8
| 28,972
|
py
|
Python
|
graalpython/lib-python/3/trace.py
|
muellren/graalpython
|
9104425805f1d38ad7a521c75e53798a3b79b4f0
|
[
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | 4
|
2018-07-06T12:18:06.000Z
|
2021-02-26T03:46:53.000Z
|
graalpython/lib-python/3/trace.py
|
muellren/graalpython
|
9104425805f1d38ad7a521c75e53798a3b79b4f0
|
[
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null |
graalpython/lib-python/3/trace.py
|
muellren/graalpython
|
9104425805f1d38ad7a521c75e53798a3b79b4f0
|
[
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | 1
|
2018-07-09T09:42:18.000Z
|
2018-07-09T09:42:18.000Z
|
#!/usr/bin/env python3
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,],
trace=0, count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
__all__ = ['Trace', 'CoverageResults']
import argparse
import linecache
import os
import re
import sys
import token
import tokenize
import inspect
import gc
import dis
import pickle
from time import monotonic as _time
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class _Ignore:
def __init__(self, modules=None, dirs=None):
self._mods = set() if not modules else set(modules)
self._dirs = [] if not dirs else [os.path.normpath(d)
for d in dirs]
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list.
if modulename in self._mods: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
for mod in self._mods:
# Need to take some care since ignoring
# "cmp" mustn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
if modulename.startswith(mod + '.'):
self._ignore[modulename] = 1
return 1
# Now check that filename isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def _modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def _fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
with open(self.infile, 'rb') as f:
counts, calledfuncs, callers = pickle.load(f)
self.update(self.__class__(counts, calledfuncs, callers))
except (OSError, EOFError, ValueError) as err:
print(("Skipping counts file %r: %s"
% (self.infile, err)), file=sys.stderr)
def is_ignored_filename(self, filename):
"""Return True if the filename does not refer to a file
we want to have reported.
"""
return filename.startswith('<') and filename.endswith('>')
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts:
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs:
calledfuncs[key] = 1
for key in other_callers:
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
Write the coverage results.
:param show_missing: Show lines that had no hits.
:param summary: Include coverage summary per module.
:param coverdir: If None, the results of each module are placed in its
directory, otherwise it is included in the directory
specified.
"""
if self.calledfuncs:
print()
print("functions called:")
calls = self.calledfuncs
for filename, modulename, funcname in sorted(calls):
print(("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname)))
if self.callers:
print()
print("calling relationships:")
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \
in sorted(self.callers):
if pfile != lastfile:
print()
print("***", pfile, "***")
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print(" -->", cfile)
lastcfile = cfile
print(" %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc))
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts:
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.items():
if self.is_ignored_filename(filename):
continue
if filename.endswith(".pyc"):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = _modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = _fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = _find_executable_linenos(filename)
else:
lnotab = {}
if lnotab:
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
with open(filename, 'rb') as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count, encoding)
if summary and n_lines:
percent = int(100 * n_hits / n_lines)
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
print("lines cov% module (path)")
for m in sorted(sums):
n_lines, percent, modulename, filename = sums[m]
print("%5d %3d%% %s (%s)" % sums[m])
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except OSError as err:
print("Can't save counts files because %s" % err, file=sys.stderr)
def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w", encoding=encoding)
except OSError as err:
print(("trace: Could not open %r for writing: %s"
"- skipping" % (path, err)), file=sys.stderr)
return 0, 0
n_lines = 0
n_hits = 0
with outfile:
for lineno, line in enumerate(lines, 1):
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in line:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(line.expandtabs(8))
return n_hits, n_lines
def _find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def _find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = _find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(_find_lines(c, strs))
return linenos
def _find_strings(filename, encoding=None):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
with open(filename, encoding=encoding) as f:
tok = tokenize.generate_tokens(f.readline)
for ttype, tstr, start, end, line in tok:
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
return d
def _find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
with tokenize.open(filename) as f:
prog = f.read()
encoding = f.encoding
except OSError as err:
print(("Not printing coverage data for %r: %s"
% (filename, err)), file=sys.stderr)
return {}
code = compile(prog, filename, "exec")
strs = _find_strings(filename, encoding)
return _find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = _Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = _time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec(cmd, globals, locals)
finally:
if not self.donothing:
_unsettrace()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = _modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX _modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = _modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print((" --- modulename: %s, funcname: %s"
% (modulename, code.co_name)))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='trace 2.0')
grp = parser.add_argument_group('Main options',
'One of these (or --report) must be given')
grp.add_argument('-c', '--count', action='store_true',
help='Count the number of times each line is executed and write '
'the counts to <module>.cover for each module executed, in '
'the module\'s directory. See also --coverdir, --file, '
'--no-report below.')
grp.add_argument('-t', '--trace', action='store_true',
help='Print each line to sys.stdout before it is executed')
grp.add_argument('-l', '--listfuncs', action='store_true',
help='Keep track of which functions are executed at least once '
'and write the results to sys.stdout after the program exits. '
'Cannot be specified alongside --trace or --count.')
grp.add_argument('-T', '--trackcalls', action='store_true',
help='Keep track of caller/called pairs and write the results to '
'sys.stdout after the program exits.')
grp = parser.add_argument_group('Modifiers')
_grp = grp.add_mutually_exclusive_group()
_grp.add_argument('-r', '--report', action='store_true',
help='Generate a report from a counts file; does not execute any '
'code. --file must specify the results file to read, which '
'must have been created in a previous run with --count '
'--file=FILE')
_grp.add_argument('-R', '--no-report', action='store_true',
help='Do not generate the coverage report files. '
'Useful if you want to accumulate over several runs.')
grp.add_argument('-f', '--file',
help='File to accumulate counts over several runs')
grp.add_argument('-C', '--coverdir',
help='Directory where the report files go. The coverage report '
'for <package>.<module> will be written to file '
'<dir>/<package>/<module>.cover')
grp.add_argument('-m', '--missing', action='store_true',
help='Annotate executable lines that were not executed with '
'">>>>>> "')
grp.add_argument('-s', '--summary', action='store_true',
help='Write a brief summary for each file to sys.stdout. '
'Can only be used with --count or --report')
grp.add_argument('-g', '--timing', action='store_true',
help='Prefix each line with the time since the program started. '
'Only used while tracing')
grp = parser.add_argument_group('Filters',
'Can be specified multiple times')
grp.add_argument('--ignore-module', action='append', default=[],
help='Ignore the given module(s) and its submodules'
'(if it is a package). Accepts comma separated list of '
'module names.')
grp.add_argument('--ignore-dir', action='append', default=[],
help='Ignore files in the given directory '
'(multiple directories can be joined by os.pathsep).')
parser.add_argument('filename', nargs='?',
help='file to run as main program')
parser.add_argument('arguments', nargs=argparse.REMAINDER,
help='arguments to the program')
opts = parser.parse_args()
if opts.ignore_dir:
rel_path = 'lib', 'python{0.major}.{0.minor}'.format(sys.version_info)
_prefix = os.path.join(sys.base_prefix, *rel_path)
_exec_prefix = os.path.join(sys.base_exec_prefix, *rel_path)
def parse_ignore_dir(s):
s = os.path.expanduser(os.path.expandvars(s))
s = s.replace('$prefix', _prefix).replace('$exec_prefix', _exec_prefix)
return os.path.normpath(s)
opts.ignore_module = [mod.strip()
for i in opts.ignore_module for mod in i.split(',')]
opts.ignore_dir = [parse_ignore_dir(s)
for i in opts.ignore_dir for s in i.split(os.pathsep)]
if opts.report:
if not opts.file:
parser.error('-r/--report requires -f/--file')
results = CoverageResults(infile=opts.file, outfile=opts.file)
return results.write_results(opts.missing, opts.summary, opts.coverdir)
if not any([opts.trace, opts.count, opts.listfuncs, opts.trackcalls]):
parser.error('must specify one of --trace, --count, --report, '
'--listfuncs, or --trackcalls')
if opts.listfuncs and (opts.count or opts.trace):
parser.error('cannot specify both --listfuncs and (--trace or --count)')
if opts.summary and not opts.count:
parser.error('--summary can only be used with --count or --report')
if opts.filename is None:
parser.error('filename is missing: required with the main options')
sys.argv = [opts.filename, *opts.arguments]
sys.path[0] = os.path.dirname(opts.filename)
t = Trace(opts.count, opts.trace, countfuncs=opts.listfuncs,
countcallers=opts.trackcalls, ignoremods=opts.ignore_module,
ignoredirs=opts.ignore_dir, infile=opts.file,
outfile=opts.file, timing=opts.timing)
try:
with open(opts.filename) as fp:
code = compile(fp.read(), opts.filename, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': opts.filename,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except OSError as err:
sys.exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not opts.no_report:
results.write_results(opts.missing, opts.summary, opts.coverdir)
if __name__=='__main__':
main()
| 38.993271
| 82
| 0.574451
|
7e4e8b41efd9e7ca58ca095311cca57b253b987c
| 31,278
|
py
|
Python
|
pdf_tool.py
|
carlovogel/pdf_tool
|
f8c02489b4baeb45406eeef2e005b917fa3a3f1f
|
[
"MIT"
] | null | null | null |
pdf_tool.py
|
carlovogel/pdf_tool
|
f8c02489b4baeb45406eeef2e005b917fa3a3f1f
|
[
"MIT"
] | null | null | null |
pdf_tool.py
|
carlovogel/pdf_tool
|
f8c02489b4baeb45406eeef2e005b917fa3a3f1f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*
import sys
import subprocess
import re
from pathlib import Path
from PyQt5 import QtWidgets
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSize, Qt
class PdfTool(QtWidgets.QDialog):
"""Main Window containing the three tabs 'Compress', 'Split' and 'Merge'.
"""
def __init__(self):
super().__init__(parent=None)
self.setWindowTitle('Pdf Tool')
self.vertical_layout = QtWidgets.QVBoxLayout()
self.tab_widget = QtWidgets.QTabWidget()
self.tab_widget.addTab(TabCompress(), 'Compress')
self.tab_widget.addTab(TabSplit(), 'Split')
self.tab_widget.addTab(TabMerge(), 'Merge')
self.vertical_layout.addWidget(self.tab_widget)
self.setLayout(self.vertical_layout)
@staticmethod
def get_all_files(folder):
"""Returns a list of all pdf files existing in the given folder.
"""
file_list = []
for item in folder.iterdir():
if item.is_file() and item.suffix == '.pdf':
file_list.append(item)
return file_list
@staticmethod
def refresh_list_widget(file_list, widget):
"""Refresh the given list widget with the given list.
"""
file_list = list(dict.fromkeys(file_list))
widget.clear()
widget.addItems([str(file) for file in file_list])
@staticmethod
def remove_file(file_list, widget):
"""Removes selected item in given list and given list widget.
"""
try:
selected_item = widget.selectedItems()[0]
file_list.remove(Path(selected_item.text()))
widget.takeItem(widget.row(selected_item))
except IndexError:
pass
@staticmethod
def get_page_count(file):
"""Returns the number of pages of the given pdf file.
"""
output = subprocess.check_output(['pdfinfo', file]).decode()
pages_line = [line for line in output.splitlines() if 'Pages:' in line][0]
page_count = int(pages_line.split(':')[1])
return page_count
class TabCompress(QtWidgets.QWidget):
"""Tab containing the elements for pdf compression.
"""
def __init__(self):
super().__init__()
self.horizontal_layout = QtWidgets.QHBoxLayout(self)
self.horizontal_layout.setContentsMargins(10, 10, 10, 10)
self.horizontal_layout.setSpacing(10)
self.file_dialog_input = QtWidgets.QFileDialog()
self.folder_dialog_output = QtWidgets.QFileDialog()
self.folder_dialog = QtWidgets.QFileDialog()
self.file_list = []
self.output_path = Path().home()
self.file_list_widget = QtWidgets.QListWidget()
self.file_list_widget.setMinimumWidth(450)
self.label_output_files = QtWidgets.QLabel(str(self.output_path))
self.line_edit_suffix = QtWidgets.QLineEdit('_2')
self.line_edit_suffix.setMaximumWidth(40)
self.line_edit_suffix.textChanged.connect(self.refresh_output_label)
self.make_layout_compress()
def make_layout_compress(self):
"""Create and arrange the layout for the compression elements.
"""
vertical_layout_compress = QtWidgets.QVBoxLayout()
self.horizontal_layout.addLayout(vertical_layout_compress)
label_list_widget = QtWidgets.QLabel('Pdf files to compress:')
push_button_load_files_input = QtWidgets.QPushButton()
push_button_load_files_input.setToolTip('Add pdf files')
push_button_load_files_input.setIcon(QIcon.fromTheme('list-add'))
push_button_load_files_input.clicked.connect(self.open_file_dialog_input)
push_button_load_folder_input = QtWidgets.QPushButton()
push_button_load_folder_input.setToolTip('Add all pdf files from a folder')
push_button_load_folder_input.setIcon(QIcon.fromTheme('folder-add'))
push_button_load_folder_input.clicked.connect(self.open_folder_dialog_input)
push_button_remove_file = QtWidgets.QPushButton()
push_button_remove_file.setIcon(QIcon.fromTheme('remove'))
push_button_remove_file.setToolTip('Remove selected item')
push_button_remove_file.clicked.connect(self.remove_file)
push_button_clear_list = QtWidgets.QPushButton()
push_button_clear_list.setIcon(QIcon.fromTheme('edit-clear-all'))
push_button_clear_list.setToolTip('Clear list')
push_button_clear_list.clicked.connect(self.clear_list)
label_suffix = QtWidgets.QLabel('Suffix for compressed output files: ')
push_button_choose_path_output = QtWidgets.QPushButton()
push_button_choose_path_output.setIcon(QIcon.fromTheme('folder-symbolic'))
push_button_choose_path_output.setToolTip('Change output path')
push_button_choose_path_output.clicked.connect(self.open_folder_dialog_output)
push_button_choose_path_output.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
push_button_choose_path_output.setMinimumHeight(1)
self.label_output_files.setAlignment(Qt.AlignTop)
label_output = QtWidgets.QLabel('Output files:')
label_output.setAlignment(Qt.AlignBottom)
push_button_start_compress = QtWidgets.QPushButton()
push_button_start_compress.setText('Start compression')
push_button_start_compress.setMinimumSize(QSize(110, 20))
push_button_start_compress.clicked.connect(self.start_compression)
vertical_layout_compress.addWidget(label_list_widget)
vertical_layout_buttons = QtWidgets.QVBoxLayout()
scroll_area = QtWidgets.QScrollArea()
scroll_area.setWidget(self.label_output_files)
scroll_area.setWidgetResizable(True);
vertical_layout_buttons.addWidget(push_button_load_files_input)
vertical_layout_buttons.addWidget(push_button_load_folder_input)
vertical_layout_buttons.addWidget(push_button_remove_file)
vertical_layout_buttons.addWidget(push_button_clear_list)
vertical_layout_buttons.setSpacing(20)
horizontal_layout_file_list = QtWidgets.QHBoxLayout()
horizontal_layout_file_list.addWidget(self.file_list_widget)
horizontal_layout_file_list.addLayout(vertical_layout_buttons)
vertical_layout_compress.addLayout(horizontal_layout_file_list)
vertical_layout_compress.addSpacing(10)
vertical_layout_compress.addWidget(label_output)
horizontal_layout_output_files = QtWidgets.QHBoxLayout()
horizontal_layout_output_files.addWidget(scroll_area)
horizontal_layout_output_files.addWidget(push_button_choose_path_output, alignment=Qt.AlignTop)
horizontal_layout_output_files.setSpacing(10)
vertical_layout_compress.addLayout(horizontal_layout_output_files)
horizontal_layout_bottom = QtWidgets.QHBoxLayout()
horizontal_layout_bottom.addWidget(label_suffix)
horizontal_layout_bottom.addWidget(self.line_edit_suffix)
horizontal_layout_bottom.addWidget(push_button_start_compress)
vertical_layout_compress.addLayout(horizontal_layout_bottom)
def start_compression(self):
"""Start the compression process by calling self.run_gs(). Opens messagebox when finished.
"""
if self.check_if_output_is_valid_and_different_to_input(self.file_list, self.output_path):
for file in self.file_list:
TabCompress.run_gs(str(file), str(self.output_path / f'{file.stem}{self.line_edit_suffix.text()}.pdf'))
message_box = QtWidgets.QMessageBox(self)
message_box.setText('Compression finished!')
message_box.show()
@staticmethod
def run_gs(input_file, output_file):
"""Runs the tool ghostscript to compress the given pdf file. Takes strings for the input
and the output file as arguments.
"""
command = ('gs', '-sDEVICE=pdfwrite', '-dNOPAUSE', '-dBATCH', f'-sOutputFile={output_file}', input_file)
subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def check_if_output_is_valid_and_different_to_input(self, input_file_list, output_path):
"""Returns True if the given output path is valid and different to all paths in the given list of input files.
Returns False otherwise.
"""
if input_file_list:
if output_path.root and output_path.is_dir():
for file in input_file_list:
if file.parent == output_path and not self.line_edit_suffix.text():
message_box = QtWidgets.QMessageBox(self)
message_box.setText(
'Suffix field is empty! Output path should be different to '
'the path of your input files to avoid losing files!'
)
message_box.show()
return False
return True
else:
message_box = QtWidgets.QMessageBox(self)
message_box.setText('No valid output path selected!')
message_box.show()
return False
else:
message_box = QtWidgets.QMessageBox(self)
message_box.setText('No input files selected!')
message_box.show()
return False
def open_file_dialog_input(self):
"""Opens the file dialog to choose the input file(s). Writes its value(s) to self.file_list.
"""
self.file_dialog_input.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
self.file_dialog_input.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
file_list_temp = self.file_dialog_input.getOpenFileNames(
self, 'Select pdf files to compress!', '', 'Pdf files (*.pdf)'
)[0]
if file_list_temp:
for file in file_list_temp:
self.file_list.append(Path(file))
PdfTool.refresh_list_widget(self.file_list, self.file_list_widget)
self.refresh_output_label()
def open_folder_dialog_input(self):
"""Opens the folder dialog to choose the folder containing the input files.
Writes its value to self.file_list via the method PdfTool.get_all_files.
"""
self.folder_dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
folder = Path(self.folder_dialog.getExistingDirectory(self, 'Select folder!'))
if folder.root:
self.file_list += PdfTool.get_all_files(folder)
PdfTool.refresh_list_widget(self.file_list, self.file_list_widget)
self.refresh_output_label()
def open_folder_dialog_output(self):
"""Opens the folder dialog to choose the destination of the output files. Writes its value to self.output_path.
"""
path = self.folder_dialog_output.getExistingDirectory(self, 'Change output folder!')
if path:
self.output_path = Path(path)
self.refresh_output_label()
def refresh_output_label(self):
"""Refresh output label to selected output path.
"""
string_output_files = ''
if self.file_list:
for file in self.file_list:
string_output_files += str(self.output_path / f'{file.stem}{self.line_edit_suffix.text()}.pdf\n')
else:
string_output_files = str(self.output_path)
self.label_output_files.setText(string_output_files)
def remove_file(self):
"""Call PdfTool.remove_file to remove selected file from list and widget.
"""
PdfTool.remove_file(self.file_list, self.file_list_widget)
self.refresh_output_label()
def clear_list(self):
"""Clear self.file_list and the related list widget.
"""
self.file_list_widget.clear()
self.file_list = []
self.refresh_output_label()
class TabSplit(QtWidgets.QWidget):
"""Tab containing the elements for pdf splitting.
"""
def __init__(self):
super().__init__()
self.horizontal_layout = QtWidgets.QHBoxLayout(self)
self.horizontal_layout.setContentsMargins(10, 10, 10, 10)
self.horizontal_layout.setSpacing(20)
self.file_dialog_input = QtWidgets.QFileDialog()
self.folder_dialog_output = QtWidgets.QFileDialog()
self.file = ""
self.label_split_pattern = QtWidgets.QLabel('Pages to extract:')
self.label_file = QtWidgets.QLabel()
self.label_file.setText('Select a pdf file!')
self.label_file.setAlignment(Qt.AlignCenter)
self.label_file.setMargin(0)
self.output_filename_line_edit = QtWidgets.QLineEdit()
self.output_filename_line_edit.textChanged.connect(self.refresh_output_label)
self.label_output_path = QtWidgets.QLabel()
self.output_path = Path().home()
self.line_edit_split_pattern = QtWidgets.QLineEdit('1-2')
self.line_edit_split_pattern.setToolTip('Example: 1-2, 5, 6-9')
self.compress_radio_button = QtWidgets.QRadioButton()
self.compress_radio_button.setText('Compress output file')
self.compress_radio_button.setChecked(True)
self.make_layout_split()
def make_layout_split(self):
"""Create and arrange the layout for the pdf splitting elements.
"""
vertical_layout_split = QtWidgets.QVBoxLayout()
self.horizontal_layout.addLayout(vertical_layout_split)
push_button_load_files_input = QtWidgets.QPushButton()
push_button_load_files_input.setIcon(QIcon.fromTheme('list-add'))
push_button_load_files_input.setToolTip('Load pdf file')
push_button_load_files_input.clicked.connect(self.open_file_dialog_input)
push_button_start_splitting = QtWidgets.QPushButton('Start splitting')
push_button_start_splitting.setIcon(QIcon.fromTheme('split'))
push_button_start_splitting.clicked.connect(self.start_splitting)
push_button_choose_path_output = QtWidgets.QPushButton()
push_button_choose_path_output.setIcon(QIcon.fromTheme('folder-symbolic'))
push_button_choose_path_output.clicked.connect(self.open_folder_dialog_output)
label_filename = QtWidgets.QLabel('Name of the output file:')
horizontal_layout_input_file = QtWidgets.QHBoxLayout()
self.label_file.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
horizontal_layout_input_file.addWidget(self.label_file)
horizontal_layout_input_file.addWidget(push_button_load_files_input)
vertical_layout_split.addLayout(horizontal_layout_input_file)
self.label_split_pattern.setAlignment(Qt.AlignBottom)
vertical_layout_split.addWidget(self.label_split_pattern)
vertical_layout_split.addWidget(self.line_edit_split_pattern)
vertical_layout_split.addSpacing(30)
vertical_layout_split.addWidget(push_button_choose_path_output)
horizontal_layout_filename = QtWidgets.QHBoxLayout()
horizontal_layout_filename.addWidget(label_filename)
self.output_filename_line_edit.setText('output')
horizontal_layout_filename.addWidget(self.output_filename_line_edit)
vertical_layout_split.addLayout(horizontal_layout_filename)
horizontal_layout_output_file = QtWidgets.QHBoxLayout()
self.label_output_path.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
horizontal_layout_output_file.addWidget(self.label_output_path)
horizontal_layout_output_file.addWidget(push_button_choose_path_output)
vertical_layout_split.addLayout(horizontal_layout_output_file)
horizontal_layout_bottom = QtWidgets.QHBoxLayout()
horizontal_layout_bottom.addWidget(self.compress_radio_button)
horizontal_layout_bottom.addWidget(push_button_start_splitting)
vertical_layout_split.addLayout(horizontal_layout_bottom)
def open_file_dialog_input(self):
"""Opens the file dialog to choose the input file. Writes its value to self.file.
"""
self.file_dialog_input.setFileMode(QtWidgets.QFileDialog.ExistingFile)
self.file_dialog_input.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
self.file = self.file_dialog_input.getOpenFileName(
self, 'Select pdf file to split!', '', 'Pdf files (*.pdf)'
)[0]
if self.file:
self.label_file.setText(f'Selected pdf file: {self.file}')
self.label_split_pattern.setText(
f'Pages to Extract: (Input file has {PdfTool.get_page_count(self.file)} pages)'
)
def open_folder_dialog_output(self):
"""Opens the folder dialog to choose the destination of the output files. Writes its value to self.output_path.
"""
path = self.folder_dialog_output.getExistingDirectory(self, 'Select output folder!')
if path:
self.label_output_path.setText(f'Output File: {path}/{self.output_filename_line_edit.text()}.pdf')
self.output_path = Path(path)
def refresh_output_label(self):
"""Refresh output label to selected output path.
"""
file_name = self.output_filename_line_edit.text()
if file_name[-4:] == '.pdf':
file_name = file_name[:-4]
self.label_output_path.setText(f'Output File: {self.output_path}/{file_name}.pdf')
def start_splitting(self):
"""Starts splitting process. Informs when finished or the split pattern has a wrong format.
"""
list_start_stop = TabSplit.analyze_split_pattern(self.line_edit_split_pattern.text())
list_indices = []
output_file = f'{self.output_path}/{self.output_filename_line_edit.text()}.pdf'
if self.file:
if list_start_stop:
for item in list_start_stop:
split_succeeded = self.split_pdf(*item, self.file, output_file)
if not split_succeeded:
return
list_indices += [n for n in range(int(item[0]), int(item[1]) + 1)]
command = ['pdfunite']
for index in list_indices:
command.append(output_file + str(index))
command.append(output_file)
subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for index in list_indices:
Path(output_file + str(index)).unlink()
if self.compress_radio_button.toggled:
TabCompress.run_gs(output_file, output_file + '_')
Path(output_file + '_').rename(Path(output_file))
message_box = QtWidgets.QMessageBox(self)
message_box.setText('Splitting finished!')
message_box.show()
else:
message_box = QtWidgets.QMessageBox(self)
message_box.setText('Wrong split format! Example: 1, 2, 4-6, 8-9')
message_box.show()
else:
message_box = QtWidgets.QMessageBox(self)
message_box.setText('No Input file selected!')
message_box.show()
@staticmethod
def analyze_split_pattern(string_split_pattern):
"""Takes the split pattern string input as argument. Returns a list with of the
list [start-page, stop-page] for each element seperated by ',' of the input string.
Returns False if the elements are not in the right format: int, or int-int.
"""
list_old = string_split_pattern.replace(' ', '').split(',')
list_new = []
r = re.compile('[0-9][0-9]*-[0-9][0-9]*')
for item in list_old:
if r.match(item) is not None:
list_new.append(item.split('-'))
elif item.isnumeric():
list_new.append([item, item])
else:
return False
return list_new
def split_pdf(self, start, stop, input_file, output_file):
"""Start single splitting process with tool pdfseperate.
Takes start page, stop page, input file and output file in string format as arguments.
Returns True if successful, False otherwise.
"""
command = ['pdfseparate', '-f', start, '-l', stop, input_file, f'{output_file}%d']
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
list_log_split = proc.stdout.readlines()
try:
log_split = list_log_split[0]
except IndexError:
log_split = b''
if b'Illegal pageNo' in log_split:
page_string = log_split.strip()[-4:].decode()
message_box = QtWidgets.QMessageBox(self)
message_box.setText(
f'Page {page_string[0]} doesn\'t exist. The pdf file only contains {page_string[2]} pages.'
)
message_box.show()
return False
return True
class TabMerge(QtWidgets.QWidget):
"""Tab containing the elements for pdf merging.
"""
def __init__(self):
super().__init__()
self.horizontal_layout = QtWidgets.QHBoxLayout(self)
self.horizontal_layout.setContentsMargins(10, 10, 10, 10)
self.horizontal_layout.setSpacing(10)
self.file_dialog_input = QtWidgets.QFileDialog()
self.folder_dialog_output = QtWidgets.QFileDialog()
self.folder_dialog = QtWidgets.QFileDialog()
self.file_list = []
self.output_filename_line_edit = QtWidgets.QLineEdit()
self.output_filename_line_edit.textChanged.connect(self.refresh_output_label)
self.label_output_path = QtWidgets.QLabel()
self.output_path = Path().home()
self.file_list_widget = QtWidgets.QListWidget()
self.compress_radio_button = QtWidgets.QRadioButton()
self.compress_radio_button.setText('Compress output file')
self.compress_radio_button.setChecked(True)
self.make_layout_merge()
def make_layout_merge(self):
"""Create and arrange the layout for the pdf merging elements.
"""
vertical_layout_merge = QtWidgets.QVBoxLayout()
self.horizontal_layout.addLayout(vertical_layout_merge)
label_list_widget = QtWidgets.QLabel('Pdf files to merge:')
push_button_up = QtWidgets.QPushButton()
push_button_up.setIcon(QIcon.fromTheme('go-up'))
push_button_up.setToolTip('Move selected item up')
push_button_up.clicked.connect(self.move_selected_item_up)
push_button_load_files_input = QtWidgets.QPushButton()
push_button_load_files_input.setToolTip('Add pdf files')
push_button_load_files_input.setIcon(QIcon.fromTheme('list-add'))
push_button_load_files_input.clicked.connect(self.open_file_dialog_input)
push_button_load_folder_input = QtWidgets.QPushButton()
push_button_load_folder_input.setToolTip('Add all pdf files from a folder')
push_button_load_folder_input.setIcon(QIcon.fromTheme('folder-add'))
push_button_load_folder_input.clicked.connect(self.open_folder_dialog_input)
push_button_remove_file = QtWidgets.QPushButton()
push_button_remove_file.setIcon(QIcon.fromTheme('list-remove'))
push_button_remove_file.setToolTip('Remove selected item')
push_button_remove_file.clicked.connect(self.remove_file)
push_button_clear_list = QtWidgets.QPushButton()
push_button_clear_list.setIcon(QIcon.fromTheme('edit-clear-all'))
push_button_clear_list.setToolTip('Clear list')
push_button_clear_list.clicked.connect(self.clear_list)
push_button_down = QtWidgets.QPushButton()
push_button_down.setIcon(QIcon.fromTheme('go-down'))
push_button_down.setToolTip('Move selected item down')
push_button_down.clicked.connect(self.move_selected_item_down)
label_filename = QtWidgets.QLabel('Name of the output file:')
push_button_choose_path_output = QtWidgets.QPushButton()
push_button_choose_path_output.setIcon(QIcon.fromTheme('folder-symbolic'))
push_button_choose_path_output.clicked.connect(self.open_folder_dialog_output)
push_button_start_merge = QtWidgets.QPushButton()
push_button_start_merge.setText('Start merging')
push_button_start_merge.setIcon(QIcon.fromTheme('merge'))
push_button_start_merge.clicked.connect(self.start_merge)
vertical_layout_merge.addWidget(label_list_widget)
vertical_layout_buttons = QtWidgets.QVBoxLayout()
vertical_layout_buttons.addWidget(push_button_up)
vertical_layout_buttons.addWidget(push_button_load_files_input)
vertical_layout_buttons.addWidget(push_button_load_folder_input)
vertical_layout_buttons.addWidget(push_button_remove_file)
vertical_layout_buttons.addWidget(push_button_clear_list)
vertical_layout_buttons.addWidget(push_button_down)
horizontal_layout_file_list = QtWidgets.QHBoxLayout()
horizontal_layout_file_list.addWidget(self.file_list_widget)
horizontal_layout_file_list.addLayout(vertical_layout_buttons)
vertical_layout_merge.addLayout(horizontal_layout_file_list)
horizontal_layout_filename = QtWidgets.QHBoxLayout()
horizontal_layout_filename.addWidget(label_filename)
self.output_filename_line_edit.setText('output')
horizontal_layout_filename.addWidget(self.output_filename_line_edit)
vertical_layout_merge.addLayout(horizontal_layout_filename)
horizontal_layout_output_file = QtWidgets.QHBoxLayout()
self.label_output_path.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
horizontal_layout_output_file.addWidget(self.label_output_path)
horizontal_layout_output_file.addWidget(push_button_choose_path_output)
vertical_layout_merge.addLayout(horizontal_layout_output_file)
horizontal_layout_bottom = QtWidgets.QHBoxLayout()
horizontal_layout_bottom.addWidget(self.compress_radio_button)
horizontal_layout_bottom.addWidget(push_button_start_merge)
vertical_layout_merge.addLayout(horizontal_layout_bottom)
def refresh_output_label(self):
"""Refresh output label to selected output path.
"""
file_name = self.output_filename_line_edit.text()
if file_name[-4:] == '.pdf':
file_name = file_name[:-4]
self.label_output_path.setText(f'Output File: {self.output_path}/{file_name}.pdf')
def move_selected_item_up(self):
"""Moves the position of the selected item in the list widget and related list up.
"""
if self.file_list:
current_row = self.file_list_widget.currentRow()
current_item = self.file_list_widget.takeItem(current_row)
self.file_list.insert(current_row - 1, self.file_list.pop(current_row))
self.file_list_widget.insertItem(current_row - 1, current_item)
self.file_list_widget.setCurrentRow(current_row - 1)
def move_selected_item_down(self):
"""Moves the position of the selected item in the list widget and related list down.
"""
if self.file_list:
current_row = self.file_list_widget.currentRow()
current_item = self.file_list_widget.takeItem(current_row)
self.file_list.insert(current_row + 1, self.file_list.pop(current_row))
self.file_list_widget.insertItem(current_row + 1, current_item)
self.file_list_widget.setCurrentRow(current_row + 1)
def open_file_dialog_input(self):
"""Opens the file dialog to choose the input file(s). Writes its value(s) to self.file_list.
"""
self.file_dialog_input.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
self.file_dialog_input.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
file_list_temp = self.file_dialog_input.getOpenFileNames(
self, 'Select pdf files to compress!', '', 'Pdf files (*.pdf)'
)[0]
if file_list_temp:
for file in file_list_temp:
self.file_list.append(Path(file))
PdfTool.refresh_list_widget(self.file_list, self.file_list_widget)
def open_folder_dialog_output(self):
"""Opens the folder dialog to choose the destination of the output file. Writes its value to self.output_path.
"""
path = self.folder_dialog_output.getExistingDirectory(self, 'Select output folder!')
if path:
self.label_output_path.setText(f'Output File: {path}/{self.output_filename_line_edit.text()}.pdf')
self.output_path = Path(path)
def open_folder_dialog_input(self):
"""Opens the folder dialog to choose the folder containing the input files.
Writes its value to self.file_list via the method PdfTool.get_all_files.
"""
self.folder_dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
folder = Path(self.folder_dialog.getExistingDirectory(self, 'Select folder!'))
if folder.root:
self.file_list += PdfTool.get_all_files(folder)
PdfTool.refresh_list_widget(self.file_list, self.file_list_widget)
def remove_file(self):
"""Call PdfTool.remove_file to remove selected file from list and widget.
"""
PdfTool.remove_file(self.file_list, self.file_list_widget)
def clear_list(self):
"""Clear self.file_list and the related list widget.
"""
self.file_list_widget.clear()
self.file_list = []
def start_merge(self):
"""Start merging process with the tool pdfunite. Informs when finished or no input or output file is given.
"""
message_box = QtWidgets.QMessageBox(self)
if self.output_filename_line_edit.text():
if self.file_list:
output_file = str(self.output_path / self.output_filename_line_edit.text())
if output_file[-4:] == '.pdf':
output_file = output_file[:-4]
command = ['pdfunite'] + self.file_list + [output_file + '.pdf']
subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if self.compress_radio_button.toggled:
TabCompress.run_gs(output_file + '.pdf', output_file + '_.pdf')
Path(output_file + '_.pdf').rename(Path(output_file + '.pdf'))
message_box.setText('Emerging finished!')
message_box.show()
else:
message_box.setText('No pdf files selected!')
message_box.show()
else:
message_box.setText('Choose a file name!')
message_box.show()
def main():
app = QtWidgets.QApplication(sys.argv)
main.pdf_tool = PdfTool()
main.pdf_tool.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 48.046083
| 119
| 0.681949
|
fb642fb6e19317066d397d79e3781b54cc7b7abd
| 15,092
|
py
|
Python
|
council_bot.py
|
DT-1236/council_bot_legacy
|
633ffb078a1d1092553315fd9eb24e25cb4f2724
|
[
"MIT"
] | 1
|
2017-04-21T08:28:28.000Z
|
2017-04-21T08:28:28.000Z
|
council_bot.py
|
DT-1236/council_bot
|
633ffb078a1d1092553315fd9eb24e25cb4f2724
|
[
"MIT"
] | null | null | null |
council_bot.py
|
DT-1236/council_bot
|
633ffb078a1d1092553315fd9eb24e25cb4f2724
|
[
"MIT"
] | 1
|
2020-02-22T02:32:36.000Z
|
2020-02-22T02:32:36.000Z
|
import datetime
import urllib
import re
import discord
from discord.ext import commands
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import logging
import Levenshtein
import member_info
logger = logging.getLogger('discord')
logger.setLevel(logging.CRITICAL)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
calendar = {'01':' Jan ', '02':' Feb ', '03':' Mar ', '04':' Apr ', '05':' May ', '06':' Jun ', '07':' Jul ', '08':' Aug ', '09':' Sep ', '10':' Oct ', '11':' Nov ', '12':' Dec '}
class Poll:
polls={}#Dictionary of polls. Keys are strings which contain poll.name. Values are the Poll objects themselves which contain a dictionary of voter information
def __init__(self, ctx, name):
self.name = name
self.deletion = False
Poll.polls[self.name]=self
self.votes = {}
present = [x for x in ctx.channel.members if not x.bot and x.status is not discord.Status.offline]
for x in present:
self.votes[x.name]='No vote recorded' #Keys are strings containing names of present members
def all_polls():
return [x.name for x in Poll.polls]
def results(self):
tally = zip(Poll.polls[self.name].votes.keys(),Poll.polls[self.name].votes.values())
return ("Current Results for Poll:%s \n"%(Poll.polls[self.name].name)+"```\n"+"%s\n"*len((Poll.polls[self.name].votes))%tuple([x for x in tally])+"```")
class Secret(Poll):
def results(self):
tally = (Poll.polls[self.name].votes.values())
return ("Current Results for secret poll, %s: \n"%(Poll.polls[self.name].name)+"```\n"+"%s\n"*len((Poll.polls[self.name].votes))%tuple([x for x in tally])+"```")
def lined_string(text):
return "```\n"+"%s\n"*len(text)%tuple(text)+"```\n"
bot = commands.Bot(command_prefix='&')
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
@bot.command(aliases=['LastLogin', 'Lastlogin','Last','last','lastLogin', 'login', 'Login'])
async def lastlogin(ctx,*,request : str=''):
"""Return last login date for a given user name. May have trouble if the search returns multiple results"""
await ctx.send(member_info.last_login(request))
@bot.command(aliases=['Allegiance'])
async def allegiance(ctx,*,request : str=''):
"""Return the alliance to which the requested player currently belongs. May have trouble if the search returns multiple results"""
await ctx.send(member_info.allegiance(request))
@bot.command(aliases=['Cups', 'cups', 'Cup', 'cup', 'Trophy', 'trophy', 'Trophies'])
async def trophies(ctx,*,request : str=''):
"""Return current trophies. May have trouble if the search returns multiple results"""
await ctx.send(member_info.trophies(request))
@bot.command(aliases=['Refresh','renew','Renew'])
async def refresh(ctx,*,request : str=''):
"""Refresh data for the member. May have trouble if the search returns multiple results"""
await ctx.send(member_info.refresh(request))
@bot.command(aliases=['Polls','Poll','poll'])
async def polls(ctx,*,request : str=''):
"""(): Returns a list of all active polls\n"""
phrase = "Active polls:\n"+"```\n"+"%s\n"*len(Poll.polls)%(tuple([x for x in Poll.polls]))+"```"
await ctx.send(phrase)
return
@bot.command(pass_context=True,aliases=['Newpoll'])
async def newpoll(ctx,*,request : str=''):
"""(poll): Creates new (poll) with all online members in channel"""
if request and request not in Poll.polls:
request = Poll(ctx,str(request))
phrase = ("New poll created: %s \nRegistered voters:\n"%(request.name)+"```\n"+"%s\n"*len(set(request.votes))%(tuple(set(request.votes)))+"```")
await ctx.send(phrase)
return
elif request:
await ctx.send("%s is already an active poll. Remove it before making it again"%request)
else:
await ctx.send("I need a name for this poll")
return
@bot.command(aliases=['Newsecret'])
async def newsecret(ctx,*,request : str=''):
"""(secret poll): Creates a new (secret poll) with all online members in channel"""
if request and request not in Poll.polls:
request = Secret(ctx,str(request))
phrase = ("New secret poll created: %s \nRegistered voters:\n"%(request.name)+"```\n"+"%s\n"*len(set(request.votes))%(tuple(set(request.votes)))+"```")
await ctx.send(phrase)
return
elif request:
await ctx.send("%s is already an active poll. Remove it before making it again"%request)
else:
await ctx.send("I need a name for this secret poll")
return
@bot.command(aliases=['Remove','delete','del','Delete','Del','erase','Erase'])
async def remove(ctx,*,request : str=''):
"""(poll): Deletes (poll). Requires the command to be repeated"""
writer = ctx.message.author.name
poll = process.extractOne("%s"%(request),Poll.polls.keys())[0]
if Poll.polls[poll].deletion==True:
del Poll.polls[poll]
await ctx.send("%s has been removed by %s"%(poll,writer))
print ("%s has removed poll: %s"%(writer,poll))
return
else:
Poll.polls[poll].deletion=True
await ctx.send("%s has been marked for removal. Repeat the remove command to finalize deletion of the poll.\n Otherwise, use the cancel command to reverse this action.\n Use the silence command to remove individual voters from a poll"%poll)
return
@bot.command()
async def cancel(ctx,*,request : str=''):
"""(poll): Cancels the delete action on (poll)"""
poll = process.extractOne("%s"%(request),Poll.polls.keys())[0]
Poll.polls[poll].deletion=False
await ctx.send("Deletion order for %s has been cancelled"%poll)
@bot.command()
async def add(ctx,*,request : str=''):
"""(poll),(member): Adds another (member) to (poll)"""
try:
text = request.split(',',2)
except:
await ctx.send("Syntax ```\n(poll),(member)```\nMember likely has to be online to be successfully added")
return
if text[1][0]==' ':
text[1]=text[1][1:]
member_check = process.extractOne("%s"%(text[1]),bot.get_all_members())
if member_check[1] > 70:
member = member_check[0]
else:
await ctx.send("I'm not sure %s is here right now. Try again when they're online"%member)
return
poll = process.extractOne("%s"%(request),Poll.polls.keys())[0]
Poll.polls[poll].votes[member.name] = 'No vote recorded'
await ctx.send("%s has been added to %s"%(member, poll))
phrase = Poll.polls[poll].results()
await ctx.send(phrase)
return
@bot.command(pass_context=True)
async def silence(ctx,*,request : str=''):
writer = ctx.message.author.name
try:
text = request.split(',',2)
except:
await ctx.send("Syntax ```\n(poll),(member)```")
return
if text[1][0]==' ':
text[1]=text[1][1:]
poll = process.extractOne("%s"%(request),Poll.polls.keys())[0]
if process.extractOne("%s"%(text[1]),Poll.polls[poll].votes.keys())[1] > 70:
member = (process.extractOne("%s"%(text[1]),Poll.polls[poll].votes.keys())[0])
del Poll.polls[poll].votes[member]
await ctx.send("%s has been removed from %s by %s"%(member, poll, writer))
print ("%s has removed %s from %s"%(writer, member, poll))
else:
await ctx.send("I don't think %s is part of %s"%(text[1], poll))
return
@bot.command(pass_context=True)
async def vote(ctx,*,request : str=''):
"""(poll),(vote): Records your (vote) for (poll)"""
voter = ctx.message.author.name
text = request.split(',',2)
if text[1][0]==' ':
text[1]=text[1][1:]
poll = process.extractOne("%s"%(text[0]),Poll.polls.keys())[0] #Gives a string of the poll which is the key to access the Poll object. process returns a tuple with the result in [0] and the match accuracy in [1]
decision = text[1]
Poll.polls[poll].votes[voter]=decision #Class Poll, dictionary of all polls, specific poll, dictionary of voters/votes in poll, specific voter value changed to decision
phrase = Poll.polls[poll].results()
await ctx.send(phrase)
return
@bot.command(aliases=['voter','Voters','Voter'])
async def voters(ctx,request : str=''):
"""(poll): Returns a list of recognized voters for (poll)"""
poll = process.extractOne("%s"%(request),Poll.polls.keys())[0]
phrase = "Registered voters for %s:\n"%(poll)+"```\n"+"%s\n"*len(set(Poll.polls[poll].votes))%(tuple(set(Poll.polls[poll].votes)))+"```"
await ctx.send(phrase)
return
@bot.command(aliases=['result','Result','Results'])
async def results(ctx,*,request : str=''):
"""(poll): Returns current results for (poll). Secret polls will not have names attached to votes"""
poll = process.extractOne("%s"%(request),Poll.polls.keys())[0]
phrase = Poll.polls[poll].results()
await ctx.send(phrase)
return
@bot.command(aliases=['Command','Commands','Commandlist'])
async def commandlist(ctx):
"""returns commands with acceptable syntax"""
phrase = """```\nnewpoll - (poll): Creates new (poll) with all online members in channel\n
newsecret - (secret poll): Creates a new (secret poll) with all online members in channel\n
results - (poll): Returns current results for (poll). Secret polls will not have names attached to votes\n
remove - (poll): Deletes (poll). Requires the command to be repeated\n
cancel - (poll): Cancels the delete action on (poll)\n
polls - (): Returns a list of all active polls\n
voters - (poll): Returns a list of recognized voters for (poll)\n
vote - (poll),(vote): Records your (vote) for (poll)\n
add - (poll),(member
): Adds another (member) to (poll)\n
silence - (poll),(member): Removes (member) from (poll)\n
```
"""
await ctx.send(phrase)
return
@bot.command(aliases=['Complete'])
async def complete(ctx,*,request : str=''):
"""Returns complete trophy data over time for a player"""
results = member_info.complete(request)
member_info.os.chdir('plots')
result = [x for x in zip(results[0],results[1])]
await ctx.send("Complete trophy data. Name and IDs:"+lined_string(result), file=discord.File(fp="plot.png"))
member_info.os.chdir('..')
return
@bot.command(aliases=['Alliance', 'Alliances', 'alliances'])
async def alliance(ctx,*,request : str=''):
"""Returns trophy data over time for an alliance"""
results = member_info.alliance(request)
member_info.os.chdir('plots')
result = [x for x in zip(results[0],results[1])]
await ctx.send("Alliance trophy data over time. Alliance names and IDs:"+lined_string(result), file=discord.File(fp="plot.png"))
member_info.os.chdir('..')
return
@bot.command(aliases=['Average', 'Averages', 'averages', 'AVG', 'avg'])
async def average(ctx,*,request : str=''):
"""Returns average member trophy data over time for an alliance"""
results = member_info.average(request)
member_info.os.chdir('plots')
result = [x for x in zip(results[0],results[1])]
await ctx.send("Average member trophy data over time. Alliance names and IDs:"+lined_string(result), file=discord.File(fp="plot.png"))
member_info.os.chdir('..')
return
@bot.command(aliases=['History', 'hist', 'Hist'])
async def history(ctx,*,request : str=''):
"""Returns the alliance history for a player"""
results = member_info.history(request)
await ctx.send("Alliance history for Player: %s, MemberID: %s is as follows:\n"%(member_info.memberIDs[int(results[1])], results[1])+lined_string(results[0]))
return
@bot.command(aliases=['Look', 'look', 'Lookup'])
async def lookup(ctx,*,request : str=''):
"""Returns ID numbers for an alliance or a member. Separate alliance or member with a comma before giving the name of an alliance or a member"""
request = request.split(',', 2)
if request[0] == 'alliance':
await ctx.send(lined_string(member_info.alliance_lookup(request[1])))
return
if request[0] == 'member' or request[0] == 'user':
await ctx.send(lined_string(member_info.member_lookup(request[1])))
return
@bot.command(aliases=['Token'])
async def token(ctx,*,request : str=''):
"""Refreshes the token. The full url is valid"""
results = member_info.token_refresh()
member_info.token,member_info.server = results[0],results[1]
await ctx.send("Token set to %s"%results[0])
return
@bot.command(aliases=['Data', 'data', 'Database'])
async def database(ctx,*,request : str=''):
"""Collects Trophy data for all members in all top 100 alliances"""
try:
await ctx.send("Attemping database function. Council bot functions will be unavailable for approximately 2-5 minutes.")
member_info.database()
await ctx.send("Database operation complete. Contact DT-1236 to ensure import into SQL server.")
except:
await ctx.send("Database operation unsuccessful. Token is likely invalid. Update with &token")
@bot.command(aliases=['Inactives', 'inactives', 'Inactive'])
async def inactive(ctx,*,request : int=''):
"""Posts a .txt file containing a list of all members and their last login per ShipService"""
try:
await ctx.send("Operation attempted. Bot function will be unavailable for approximately 2-5 minutes")
member_info.inactives(request)
member_info.os.chdir('lists')
await ctx.send("Last Login data for %s is in this .txt"%member_info.allianceIDs[request], file=discord.File(fp='%s - %s Inactives.txt'%(str(datetime.date.today()),member_info.allianceIDs[request])))
member_info.os.chdir('..')
return
except:
await ctx.send("Something wrong happened. This function only works with Alliance IDs. Find some with ```&lookup alliance, [alliance name]``` Alternatively, the token could be wrong. Reset it with ```&token [string]```")
@bot.command(aliases=['Recipient', 'Receive', 'receive'])
async def recipient(ctx,*,request : str=''):
"""Returns the recipients for donated crew"""
owner = member_info.member_lookup(request)[0]
request = owner[1]
try:
await ctx.send("Operation attempted. Searching for crew donated by: %s. Functions will be unavailable for approximately 1-5 minutes"%owner[0])
results = member_info.recipient(request)
await ctx.send("Crew given by %s: %s were received by"%(owner[0],request)+lined_string(results))
return
except:
await ctx.send("Operation failed. Try a token refresh with &token or confirming ID with &lookup member")
return
bot.run('Mjc3MTkxNjczOTk2NTA5MTg0.C3aKYA.UF2sH6PrBdOxT6znHJAd66_k07Q') #Council bot's token
| 47.015576
| 249
| 0.647893
|
fe8510a81c5105c4f794f82600e27a5478c8e2a3
| 4,672
|
py
|
Python
|
dmlab/env.py
|
do-not-be-hasty/seed_rl
|
1e94de42dd7f40c6981a5099fb1acdc395d6b147
|
[
"Apache-2.0"
] | 2
|
2021-11-23T17:50:59.000Z
|
2022-01-13T12:10:00.000Z
|
dmlab/env.py
|
awarelab/seed_rl
|
b738be03e4d3c49ca259fae88d26cb747b771a65
|
[
"Apache-2.0"
] | 3
|
2020-11-12T03:32:54.000Z
|
2020-11-14T14:31:31.000Z
|
dmlab/env.py
|
awarelab/seed_rl
|
b738be03e4d3c49ca259fae88d26cb747b771a65
|
[
"Apache-2.0"
] | 2
|
2020-10-25T03:21:48.000Z
|
2020-12-28T06:00:04.000Z
|
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DeepMind Lab Gym wrapper."""
import hashlib
import os
from absl import flags
from absl import logging
import gym
import numpy as np
from seed_rl.common import common_flags
from seed_rl.dmlab import games
import tensorflow as tf
import deepmind_lab
FLAGS = flags.FLAGS
flags.DEFINE_string('homepath', '', 'Labyrinth homepath.')
flags.DEFINE_string(
'dataset_path', '', 'Path to dataset needed for psychlab_*, see '
'https://github.com/deepmind/lab/tree/master/data/brady_konkle_oliva2008')
flags.DEFINE_string('game', 'explore_goal_locations_small', 'Game/level name.')
flags.DEFINE_integer('width', 96, 'Width of observation.')
flags.DEFINE_integer('height', 72, 'Height of observation.')
flags.DEFINE_string('level_cache_dir', None, 'Global level cache directory.')
DEFAULT_ACTION_SET = (
(0, 0, 0, 1, 0, 0, 0), # Forward
(0, 0, 0, -1, 0, 0, 0), # Backward
(0, 0, -1, 0, 0, 0, 0), # Strafe Left
(0, 0, 1, 0, 0, 0, 0), # Strafe Right
(-20, 0, 0, 0, 0, 0, 0), # Look Left
(20, 0, 0, 0, 0, 0, 0), # Look Right
(-20, 0, 0, 1, 0, 0, 0), # Look Left + Forward
(20, 0, 0, 1, 0, 0, 0), # Look Right + Forward
(0, 0, 0, 0, 1, 0, 0), # Fire.
)
class LevelCache(object):
"""Level cache."""
def __init__(self, cache_dir):
self._cache_dir = cache_dir
def get_path(self, key):
key = hashlib.md5(key.encode('utf-8')).hexdigest()
dir_, filename = key[:3], key[3:]
return os.path.join(self._cache_dir, dir_, filename)
def fetch(self, key, pk3_path):
path = self.get_path(key)
try:
tf.io.gfile.copy(path, pk3_path, overwrite=True)
return True
except tf.errors.OpError:
return False
def write(self, key, pk3_path):
path = self.get_path(key)
if not tf.io.gfile.exists(path):
tf.io.gfile.makedirs(os.path.dirname(path))
tf.io.gfile.copy(pk3_path, path)
class DmLab(gym.Env):
"""DeepMind Lab wrapper."""
def __init__(self, game, num_action_repeats, seed, is_test, config,
action_set=DEFAULT_ACTION_SET, level_cache_dir=None):
if is_test:
config['allowHoldOutLevels'] = 'true'
# Mixer seed for evalution, see
# https://github.com/deepmind/lab/blob/master/docs/users/python_api.md
config['mixerSeed'] = 0x600D5EED
if game in games.ALL_GAMES:
game = 'contributed/dmlab30/' + game
config['datasetPath'] = FLAGS.dataset_path
self._num_action_repeats = num_action_repeats
self._random_state = np.random.RandomState(seed=seed)
if FLAGS.homepath:
deepmind_lab.set_runfiles_path(FLAGS.homepath)
self._env = deepmind_lab.Lab(
level=game,
observations=['RGB_INTERLEAVED'],
level_cache=LevelCache(level_cache_dir) if level_cache_dir else None,
config={k: str(v) for k, v in config.items()},
)
self._action_set = action_set
self.action_space = gym.spaces.Discrete(len(self._action_set))
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(config['height'], config['width'], 3),
dtype=np.uint8)
def _observation(self):
return self._env.observations()['RGB_INTERLEAVED']
def reset(self):
self._env.reset(seed=self._random_state.randint(0, 2 ** 31 - 1))
return self._observation()
def step(self, action):
raw_action = np.array(self._action_set[action], np.intc)
reward = self._env.step(raw_action, num_steps=self._num_action_repeats)
done = not self._env.is_running()
observation = None if done else self._observation()
return observation, reward, done, {}
def close(self):
self._env.close()
def create_environment(task):
logging.info('Creating environment: %s', FLAGS.game)
return DmLab(FLAGS.game,
FLAGS.num_action_repeats,
seed=task + 1,
is_test=False,
level_cache_dir=FLAGS.level_cache_dir,
config={
'width': FLAGS.width,
'height': FLAGS.height,
'logLevel': 'WARN',
})
| 31.782313
| 79
| 0.658604
|
c34b5f2387e700c991c23890b520f897b1b2c0c3
| 59,916
|
py
|
Python
|
KMC_allinone.py
|
laurisikk/KMC_GUI
|
77bdd186c06537447e5eb41c21d5f95a11cf8c2e
|
[
"MIT"
] | null | null | null |
KMC_allinone.py
|
laurisikk/KMC_GUI
|
77bdd186c06537447e5eb41c21d5f95a11cf8c2e
|
[
"MIT"
] | null | null | null |
KMC_allinone.py
|
laurisikk/KMC_GUI
|
77bdd186c06537447e5eb41c21d5f95a11cf8c2e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import math
import sys
import os
import numpy as np
import KMC_test8 as KMC_engine
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QPushButton, QAction, QVBoxLayout, QGraphicsView, QToolBar, QGraphicsScene, QButtonGroup, QHBoxLayout, QGraphicsRectItem, QGraphicsItem, QGraphicsItemGroup, QMenu, QAction, QLabel, QDialog, QLineEdit, QMessageBox, QFileDialog, QListView
from PyQt5.QtGui import QIcon, QPixmap, QPolygon, QColor, QPainter, QPen, QBrush, QTransform, QFont, QFontMetrics, QPolygonF, QPainterPath, QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt, QPointF, QRectF, QLine, QVariant
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
### Global variables and options
TOOLS= ['species', 'reactAtoB', 'reactABtoC', 'reactAtoBC', 'reactABtoCD', 'reactAtoBCD','reactABtoCDE']
Na=6.02*10**23
nodeFont=QFont("times",16)
speciesFillColor=QColor(79,126,151)
reactionsFillColor=QColor(46,144,66)
plugSideLength=30
plugWidth=plugSideLength*(math.sqrt(3)/2)
lineInteractionRange=20
speciesCounter=1
speciesList=[]
reactionsCounter=1
reactionsList=[]
connectionsCounter=1
connectionsList=[]
isMoving=False
movingItem=None
isConnecting=False
connectionStart=None
connectionEnd=None
KMCParams=[100000,1,0.01,1,1]#total N of starting particles,repeats,store timestep,max time,volume
fileName=None
lasttVector=[]
lastPVector=[]
lastCVector=[]
######################
### global Methods ###
######################
def calcPopulationVector(totalNParticles):
global speciesList
global KMCParams
Na=6.02*10**23
concList=[]
sumConc=0
if len(speciesList)!=0:
for species in speciesList:
concList.append(species.nodeBox.number)
sumConc+=species.nodeBox.number
if sumConc !=None:
V=totalNParticles/(sumConc*Na)
populationList=np.array([])
for concentration in concList:
populationList=np.append(populationList,int(V*concentration*Na))
if sumConc !=None:
KMCParams[4]=V
return populationList
#edit KMC parameters
def editKMC():
global speciesList
global KMCParams
getKMCParams=editKMCParams()
#generate output file stream
def generateOutputStream():
global speciesList
global reactionsList
global connectionsList
global KMCParams
outputStream=[]
#species list
for species in speciesList:
outputLine='//species '+str(species.pos().x())+" "+str(species.pos().y())+" "+str(species.nodeBox.name)+" "+str(species.nodeBox.number)
outputStream.append(outputLine)
#reactions list
for reactions in reactionsList:
outputLine='//reactions '+str(reactions.pos().x())+" "+str(reactions.pos().y())+" "+str(reactions.nodeBox.name)+" "+str(reactions.nodeBox.number)
#append reaction type to the end of the line
if isinstance(reactions,reactionAtoBNode)==True:
outputLine=outputLine+" AtoB"
if isinstance(reactions,reactionABtoCNode)==True:
outputLine=outputLine+" ABtoC"
if isinstance(reactions,reactionAtoBCNode)==True:
outputLine=outputLine+" AtoBC"
outputStream.append(outputLine)
#connections list
for connection in connectionsList:
outputLine='//connections '+str(connection.startNode.parentItem().nodeBox.name)+" "+str(connection.startNode.name)+" "+str(connection.endNode.parentItem().nodeBox.name)+" "+str(connection.endNode.name)
outputStream.append(outputLine)
#KMC parameters
outputLine='//KMCparams '+str(KMCParams[0])+" "+str(KMCParams[1])+" "+str(KMCParams[2])+" "+str(KMCParams[3])+" "+str(KMCParams[4])
outputStream.append(outputLine)
#population vector
populationVector=calcPopulationVector(KMCParams[0])
outputLine='//popVector'
for item in populationVector:
outputLine=outputLine+" "+str(item)
outputStream.append(outputLine)
#name vector
outputLine='//nameVector'
for species in speciesList:
outputLine=outputLine+" "+str(species.nodeBox.name)
outputStream.append(outputLine)
#rate vector
outputLine='//rateVector'
for reaction in reactionsList:
outputLine=outputLine+" "+str(reaction.nodeBox.number)
outputStream.append(outputLine)
#connectivity matrix
connectivityMatrix=np.zeros(shape=(len(reactionsList),len(speciesList)))
#iterate over all reactions (rows in connectivity matrix)
i=0
while i < len(reactionsList):
#iterate over plugs of reaction
for reactionChildItem in reactionsList[i].childItems():
if isinstance(reactionChildItem,plug):
#iterate over all species (columns in connectivity matrix)
j=0
while j< len(speciesList):
#check all plugs in given species
for speciesChildItem in speciesList[j].childItems():
if isinstance(speciesChildItem,plug):
#iterate over all connections to check if connection exists
for connection in connectionsList:
#check if connection's start and end plugs are identical to current reaction and species plugs
if connection.startNode==reactionChildItem and connection.endNode==speciesChildItem:
if reactionChildItem.mode=="in":
connectivityMatrix[i][j]-=1
if reactionChildItem.mode=="out":
connectivityMatrix[i][j]+=1
if connection.endNode==reactionChildItem and connection.startNode==speciesChildItem:
if reactionChildItem.mode=="in":
connectivityMatrix[i][j]-=1
if reactionChildItem.mode=="out":
connectivityMatrix[i][j]+=1
j+=1
i+=1
for line in connectivityMatrix:
outputLine='//connMatrix'
for item in line:
outputLine=outputLine+" "+str(item)
outputStream.append(outputLine)
return outputStream
def readInputStream(inputStream):
global speciesList
global reactionsList
global connectionsList
global speciesCounter
global reactionsCounter
global connectionsCounter
global KMCParams
speciesCounter=0
reactionsCounter=0
connectionsCounter=0
for line in inputStream:
lineList=line.split(" ")
if lineList[0] == '//species':
#generate new species based on this line information
objectName=lineList[3]
objectName=speciesNode(QPointF(float(lineList[1]),float(lineList[2])),lineList[3],float(lineList[4]))
speciesList.append(objectName)
speciesCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
if lineList[0] =='//reactions':
#generate new reactions based on this line information
objectName=lineList[3]
if lineList[5]=='AtoB':
objectName=reactionAtoBNode(QPointF(float(lineList[1]),float(lineList[2])),lineList[3],float(lineList[4]))
if lineList[5]=='AtoBC':
objectName=reactionAtoBCNode(QPointF(float(lineList[1]),float(lineList[2])),lineList[3],float(lineList[4]))
if lineList[5]=='ABtoC':
objectName=reactionABtoCNode(QPointF(float(lineList[1]),float(lineList[2])),lineList[3],float(lineList[4]))
reactionsList.append(objectName)
reactionsCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
if lineList[0] =='//connections':
#generate new connections based on this line information
#get plugs of startNode:
for species in speciesList+reactionsList:
for plugItem in species.childItems():
if isinstance(plugItem,plug):
if lineList[1] == species.nodeBox.name and lineList[2]==plugItem.name:
startPlug=plugItem
if lineList[3] == species.nodeBox.name and lineList[4]==plugItem.name:
endPlug=plugItem
objectName='connection'+str(connectionsCounter)
objectName=connection(startPlug,endPlug)
connectionsList.append(objectName)
connectionsCounter+=1
AppWindow.canvas.addItem(objectName)
if lineList[0]=='//KMCparams':
#generate KMC parameters based on this line information
#KMCParams=[100000,1,0.01,1,1]#total N of starting particles,repeats,store timestep,max time,volume
KMCParams[0]=int(lineList[1])
KMCParams[1]=int(lineList[2])
KMCParams[2]=float(lineList[3])
KMCParams[3]=float(lineList[4])
KMCParams[4]=float(lineList[5])
#calculate the height and width of node box
def getNodeWH(textH,titleTextW,numberTextW):
h=2.5*textH #2.5 because font "times" has leading -1 (text has one preceeding empty line)
if numberTextW>titleTextW:
w=numberTextW+15
else:
w=titleTextW+15
if w<h:
w=h #if text and number are short, make it a rectangle for aesthetic reasons
return w,h
# get width of text
def getTextWidth(text):
global nodeFont
fontMetrics=QFontMetrics(nodeFont)
w=fontMetrics.boundingRect(text).width()
return w
# get height of text
def getTextHeight(text):
global nodeFont
fontMetrics=QFontMetrics(nodeFont)
h=fontMetrics.boundingRect(text).height()
return h
#create node
def createNode(tool,position):
global reactionsCounter
global speciesCounter
global reactionsList
global speciesList
if tool=="unselected":
AppWindow.statusBar().showMessage("No tool selected",5000)
if tool=="species":
objectName='species'+str(speciesCounter)
objectTitle='S'+str(speciesCounter)
objectName=speciesNode(position,objectTitle,1.0)#create node; no of molecules=10000
speciesList.append(objectName)
speciesCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
if tool=="reactAtoB":
objectName='reaction'+str(reactionsCounter)
objectTitle='R'+str(reactionsCounter)
objectName=reactionAtoBNode(position,objectTitle,10)#create node; no of molecules=10000
reactionsList.append(objectName)
reactionsCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
if tool=="reactABtoC":
objectName='reaction'+str(reactionsCounter)
objectTitle='R'+str(reactionsCounter)
objectName=reactionABtoCNode(position,objectTitle,10)#create node; no of molecules=10000
reactionsList.append(objectName)
reactionsCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
if tool=="reactAtoBC":
objectName='reaction'+str(reactionsCounter)
objectTitle='R'+str(reactionsCounter)
objectName=reactionAtoBCNode(position,objectTitle,10)#create node; no of molecules=10000
reactionsList.append(objectName)
reactionsCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
if tool=="reactABtoCD":
objectName='reaction'+str(reactionsCounter)
objectTitle='R'+str(reactionsCounter)
objectName=reactionABtoCDNode(position,objectTitle,10)#create node; no of molecules=10000
reactionsList.append(objectName)
reactionsCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
if tool=="reactAtoBCD":
objectName='reaction'+str(reactionsCounter)
objectTitle='R'+str(reactionsCounter)
objectName=reactionAtoBCDNode(position,objectTitle,10)#create node; no of molecules=10000
reactionsList.append(objectName)
reactionsCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
if tool=="reactABtoCDE":
objectName='reaction'+str(reactionsCounter)
objectTitle='R'+str(reactionsCounter)
objectName=reactionABtoCDENode(position,objectTitle,10)#create node; no of molecules=10000
reactionsList.append(objectName)
reactionsCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
#create connection
def createConnection():
global isConnecting
global connectionStart
global connectionEnd
global connectionsList
global connectionsCounter
if isinstance(connectionStart,plug) and isinstance(connectionEnd,plug):
legalConnection=True
#test if valid connection
if isConnecting==True:
#put in checks so that only legal connections are allowed
#1. You cannot connect node to itself
if connectionStart.parentItem()==connectionEnd.parentItem():
AppWindow.statusBar().showMessage("You cannot connect node to itself!",5000)
legalConnection=False
#2. You cannot create multiple connections between same plug pairs
for existingConnection in connectionsList:
if existingConnection.startNode==connectionStart and existingConnection.endNode ==connectionEnd:
AppWindow.statusBar().showMessage("You cannot create multiple connections between same plug pairs!",5000)
legalConnection=False
if existingConnection.endNode==connectionStart and existingConnection.startNode ==connectionEnd:
AppWindow.statusBar().showMessage("You cannot create multiple connections between same plug pairs!",5000)
legalConnection=False
#3. You can only connect different type of plugs
if connectionStart.mode==connectionEnd.mode:
AppWindow.statusBar().showMessage("You can only connect different type of plugs!",5000)
legalConnection=False
#4. You can only connect reactions to species and vice versa
if connectionStart.parentItem().nodeType==connectionEnd.parentItem().nodeType:
AppWindow.statusBar().showMessage("You can only connect reactions to species and vice versa!",5000)
legalConnection=False
#5. Only one connection is allowed per reaction plug
if connectionStart.parentItem().nodeType=='reaction':
for existingConnection in connectionsList:
if existingConnection.startNode==connectionStart or existingConnection.endNode ==connectionStart:
AppWindow.statusBar().showMessage("Only one connection is allowed per reaction plug!",5000)
legalConnection=False
if connectionEnd.parentItem().nodeType=='reaction':
for existingConnection in connectionsList:
if existingConnection.startNode==connectionEnd or existingConnection.endNode ==connectionEnd:
AppWindow.statusBar().showMessage("Only one connection is allowed per reaction plug!",5000)
legalConnection=False
if legalConnection==True:
#actually create the connection
objectName='connection'+str(connectionsCounter)
objectName=connection(connectionStart,connectionEnd)
connectionsList.append(objectName)
connectionsCounter+=1
AppWindow.canvas.addItem(objectName)
AppWindow.canvas.update()
isConnecting=False
connectionStart=None
connectionEnd=None
def runKMC():
global lasttVector
global lastPVector
global lastCVector
global KMCParams
global speciesList
global reactionsList
global connectionsList
global fileName
if fileName==None:
AppWindow.saveFile()
# generate population vector
populationVector=calcPopulationVector(KMCParams[0])
# generate rate constants vector
rateConstantsVector=np.array([])
for reaction in reactionsList:
rateConstantsVector=np.append(rateConstantsVector,reaction.nodeBox.number)
#generate connectivity matrix
connectivityMatrix=np.zeros(shape=(len(reactionsList),len(speciesList)))
#iterate over all reactions (rows in connectivity matrix)
i=0
while i < len(reactionsList):
#iterate over plugs of reaction
for reactionChildItem in reactionsList[i].childItems():
if isinstance(reactionChildItem,plug):
#iterate over all species (columns in connectivity matrix)
j=0
while j< len(speciesList):
#check all plugs in given species
for speciesChildItem in speciesList[j].childItems():
if isinstance(speciesChildItem,plug):
#iterate over all connections to check if connection exists
for connection in connectionsList:
#check if connection's start and end plugs are identical to current reaction and species plugs
if connection.startNode==reactionChildItem and connection.endNode==speciesChildItem:
if reactionChildItem.mode=="in":
connectivityMatrix[i][j]-=1
if reactionChildItem.mode=="out":
connectivityMatrix[i][j]+=1
if connection.endNode==reactionChildItem and connection.startNode==speciesChildItem:
if reactionChildItem.mode=="in":
connectivityMatrix[i][j]-=1
if reactionChildItem.mode=="out":
connectivityMatrix[i][j]+=1
j+=1
i+=1
lasttVector,lastPVector=KMC_engine.runKMC(populationVector,rateConstantsVector,connectivityMatrix,KMCParams[4],KMCParams[1],KMCParams[2],KMCParams[3])
#calculate concentration vector from population vector and volume
if len(lasttVector)>0 and len(lastPVector)>0:
lastCVector=np.empty(shape=lastPVector.shape)
x=0
while x< len(lastCVector[:,0]):
y=0
while y< len(lastCVector[x,:]):
lastCVector[x,y]=lastPVector[x,y]/(Na*KMCParams[4])
y+=1
x+=1
#write population vector output file
outPopFileName=fileName+'_population.csv'
POPOUT=open(outPopFileName, 'w')
POPOUT.write("t ")
for species in speciesList:
POPOUT.write(str(species.nodeBox.name)+" ")
POPOUT.write("\n")
for i in range(len(lasttVector)):
POPOUT.write(str(lasttVector[i])+" ")
for j in range(len(lastPVector[i])):
POPOUT.write(str(lastPVector[i,j])+" ")
POPOUT.write("\n")
POPOUT.close()
#write concentration vector output file
outConcFileName=fileName+'_concentration.csv'
CONCOUT=open(outConcFileName, 'w')
CONCOUT.write("t ")
for species in speciesList:
CONCOUT.write(str(species.nodeBox.name)+" ")
CONCOUT.write("\n")
for i in range(len(lasttVector)):
CONCOUT.write(str(lasttVector[i])+" ")
for j in range(len(lastCVector[i])):
CONCOUT.write(str(lastCVector[i,j])+" ")
CONCOUT.write("\n")
CONCOUT.close()
###############
### Classes ###
###############
class PlotWindow(QMainWindow):
def __init__(self):
super(PlotWindow,self).__init__()
global lasttVector
global lastPVector
global speciesList
global lastCVector
#set central widget
self.centralWidget=QWidget()
self.setCentralWidget(self.centralWidget)
#set layout
self.HLayout=QHBoxLayout()
self.centralWidget.setLayout(self.HLayout)
self.setWindowTitle("Plotting results")
#generate figure canvas
self.fig=Figure((10.0,12.0),dpi=100)
self.canvas=FigureCanvas(self.fig)
self.canvas.setParent(self)
self.axes=self.fig.add_subplot(111)
#add matplotlib standard toolbar
self.matPlotLibToolbar=NavigationToolbar(self.canvas,self)
self.plotLayout=QVBoxLayout()
self.HLayout.addLayout(self.plotLayout)
self.plotLayout.addWidget(self.canvas)
self.plotLayout.addWidget(self.matPlotLibToolbar)
#set layout for buttons and list
self.VLayout=QVBoxLayout()
self.HLayout.addLayout(self.VLayout)
#add listview for selecting data series
self.listView=QListView()
self.listModel=QStandardItemModel()
self.createDataSeries()
self.listView.setModel(self.listModel)
self.VLayout.addWidget(self.listView)
#add button to display graph
self.testButton=QPushButton("show")
self.VLayout.addWidget(self.testButton)
self.testButton.clicked.connect(self.onShow)
def createDataSeries(self):
self.listModel.clear()
for species in speciesList:
item=QStandardItem(species.nodeBox.name)
item.setCheckState(Qt.Checked)
item.setCheckable(True)
self.listModel.appendRow(item)
def onShow(self):
self.axes.clear()
i=0
if len(lastCVector)!=0:
numberOfSpecies=len(lastCVector[0,:])
for row in range(self.listModel.rowCount()):
index=self.listModel.index(row,0)
if self.listModel.data(index,Qt.CheckStateRole)==QVariant(Qt.Checked):
self.axes.scatter(lasttVector,lastCVector[:,i],label=speciesList[i].nodeBox.name)
i+=1
self.axes.legend(loc='best')
self.canvas.draw()
class confirmWindow(QDialog):
def __init__(self,text):
super(confirmWindow,self).__init__()
self.setGeometry(100,100,50,50)
#set vertical layout
self.VLayout=QVBoxLayout()
self.setLayout(self.VLayout)
#display text
self.textDisplay=QLabel()
self.textDisplay.setText(text)
self.VLayout.addWidget(self.textDisplay)
#create horizontal layout
self.HLayout=QHBoxLayout()
self.VLayout.addLayout(self.HLayout)
#create OK button
self.OKButton=QPushButton("OK",self)
self.OKButton.clicked.connect(self.OKPressed)
self.HLayout.addWidget(self.OKButton)
#create OK button
self.CancelButton=QPushButton("Cancel",self)
self.CancelButton.clicked.connect(self.CancelPressed)
self.HLayout.addWidget(self.CancelButton)
#display window
self.exec()
def OKPressed(self,pressed):
self.close()
def CancelPressed(self,pressed):
self.close()
# class for editing KMC parameters
class editKMCParams(QDialog):
def __init__(self):
super(editKMCParams,self).__init__()
self.setGeometry(100,100,400,200)
global KMCParams
#KMCParams=[1,1,0.01,1]#volume,repeats,timestep,max time
#set vertical layout
self.VLayout=QVBoxLayout()
self.setLayout(self.VLayout)
#layout for time interval
self.tIntervalLine=QHBoxLayout()
self.VLayout.addLayout(self.tIntervalLine)
self.tIntervalLabel=QLabel()
self.tIntervalLabel.setText("Timestep for data storage (s):")
self.tIntervalLine.addWidget(self.tIntervalLabel)
self.tIntervalEdit=QLineEdit()
self.tIntervalEdit.setText(str(KMCParams[2]))
self.tIntervalLine.addWidget(self.tIntervalEdit)
#layout for maximum allowed time
self.maxTLine=QHBoxLayout()
self.VLayout.addLayout(self.maxTLine)
self.maxTLabel=QLabel()
self.maxTLabel.setText("Max simulation time (s):")
self.maxTLine.addWidget(self.maxTLabel)
self.maxTEdit=QLineEdit()
self.maxTEdit.setText(str(KMCParams[3]))
self.maxTLine.addWidget(self.maxTEdit)
#layout for total number of starting particles
self.totalParticlesLine=QHBoxLayout()
self.VLayout.addLayout(self.totalParticlesLine)
self.totalParticlesLabel=QLabel()
self.totalParticlesLabel.setText("Total number of starting molecules:")
self.totalParticlesLine.addWidget(self.totalParticlesLabel)
self.totalParticlesEdit=QLineEdit()
self.totalParticlesEdit.setText(str(KMCParams[0]))
self.totalParticlesLine.addWidget(self.totalParticlesEdit)
#layout for repeats
self.repeatsLine=QHBoxLayout()
self.VLayout.addLayout(self.repeatsLine)
self.repeatsLabel=QLabel()
self.repeatsLabel.setText("Number of repeats:")
self.repeatsLine.addWidget(self.repeatsLabel)
self.repeatsEdit=QLineEdit()
self.repeatsEdit.setText(str(KMCParams[1]))
self.repeatsLine.addWidget(self.repeatsEdit)
#layout for displaying volume
self.VolumeLine=QHBoxLayout()
self.VLayout.addLayout(self.VolumeLine)
self.VolumeLabel=QLabel()
self.VolumeLabel.setText("Simulation volume (L):")
self.VolumeLine.addWidget(self.VolumeLabel)
self.VolumeValue=QLabel()
popList=calcPopulationVector(KMCParams[0])
self.VolumeValue.setText(str(KMCParams[4]))
self.VolumeLine.addWidget(self.VolumeValue)
#layout for buttons line
self.ButtonsLine=QHBoxLayout()
self.VLayout.addLayout(self.ButtonsLine)
#create OK button
self.OKButton=QPushButton("OK",self)
self.OKButton.clicked.connect(self.OKPressed)
self.ButtonsLine.addWidget(self.OKButton)
#create Cancel button
self.CancelButton=QPushButton("Cancel",self)
self.CancelButton.clicked.connect(self.CancelPressed)
self.ButtonsLine.addWidget(self.CancelButton)
#launch window
self.exec()
def OKPressed(self,pressed):
source=self.sender()
validOutput=True
global KMCParams
try:
float(self.tIntervalEdit.text())
except:
invalidWindow=QMessageBox.information(self,"Error","time interval must be a number")
validOutput=False
try:
float(self.maxTEdit.text())
except:
invalidWindow=QMessageBox.information(self,"Error","maximum time must be a number")
validOutput=False
try:
int(self.totalParticlesEdit.text())
except:
invalidWindow=QMessageBox.information(self,"Error","total number of starting molecules must be an integer")
validOutput=False
try:
int(self.repeatsEdit.text())
except:
invalidWindow=QMessageBox.information(self,"Error","number of repats must be an integer")
validOutput=False
if validOutput==True:
#KMCParams=[100000,1,0.01,1,1]#total N of starting particles,repeats,store timestep,max time,volume
KMCParams[0]=int(self.totalParticlesEdit.text())
KMCParams[1]=int(self.repeatsEdit.text())
KMCParams[2]=float(self.tIntervalEdit.text())
KMCParams[3]=float(self.maxTEdit.text())
self.close()
def CancelPressed(self,pressed):
#do nothing when cancel is pressed - delete widget and do not save changes
self.close()
# class for editing node objects
class editNodes(QDialog):
def __init__(self,node,nodeType,name,number):
super(editNodes,self).__init__()
self.setGeometry(100,100,200,150)
self.originNode=node
self.originType=nodeType
#set vertical layout
self.VLayout=QVBoxLayout()
self.setLayout(self.VLayout)
#layout for node Text and edit
self.textLine=QHBoxLayout()
self.VLayout.addLayout(self.textLine)
self.nameLabel=QLabel()
self.nameLabel.setText("Name:")
self.textLine.addWidget(self.nameLabel)
self.nameEdit=QLineEdit()
self.nameEdit.setText(name)
self.textLine.addWidget(self.nameEdit)
#layout for node number and edit
self.numberLine=QHBoxLayout()
self.VLayout.addLayout(self.numberLine)
self.numberLabel=QLabel()
if nodeType=="species":
self.numberLabel.setText("Concentration (mol/L):")
if nodeType=="reaction":
self.numberLabel.setText("Rate constant:")
self.numberLine.addWidget(self.numberLabel)
self.numberEdit=QLineEdit()
self.numberEdit.setText(str(number))
self.numberLine.addWidget(self.numberEdit)
#layout for buttons line
self.ButtonsLine=QHBoxLayout()
self.VLayout.addLayout(self.ButtonsLine)
#create OK button
self.OKButton=QPushButton("OK",self)
self.OKButton.clicked.connect(self.OKPressed)
self.ButtonsLine.addWidget(self.OKButton)
#create Cancel button
self.CancelButton=QPushButton("Cancel",self)
self.CancelButton.clicked.connect(self.CancelPressed)
self.ButtonsLine.addWidget(self.CancelButton)
#launch window
self.exec()
def OKPressed(self,pressed):
source=self.sender()
self.validName=True
self.validNumber=True
global reactionsList
global speciesList
#check if name field is unique
for species in speciesList:
if species.nodeBox==self.originNode:
pass
elif species.nodeBox.name==self.nameEdit.text():
invalidWindow=QMessageBox.information(self,"Error","name in use")
self.validName=False
for reactions in reactionsList:
if reactions.nodeBox==self.originNode:
pass
elif reactions.nodeBox.name==self.nameEdit.text():
invalidWindow=QMessageBox.information(self,"Error","name in use")
self.validName=False
#check if number is int or float (for species and reaction, respectively)
if self.originType=="species":
try:
float(self.numberEdit.text())
#check if concentration is positive or 0
if float(self.numberEdit.text()) <0:
self.validNumber=False
invalidWindow=QMessageBox.information(self,"Error","concentration must be positive or 0")
except ValueError:
invalidWindow=QMessageBox.information(self,"Error","concentration must be floating point number")
self.validNumber=False
if self.originType=="reaction":
try:
float(self.numberEdit.text())
#check if rate constant is positive or 0
if float(self.numberEdit.text()) <0:
self.validNumber=False
invalidWindow=QMessageBox.information(self,"Error","rate constant must be positive or 0")
except ValueError:
invalidWindow=QMessageBox.information(self,"Error","rate constant must be integer or floating point number")
self.validNumber=False
#if all values are acceptable, save changes and close widget
if self.validName==True and self.validNumber==True:
self.originNode.name=self.nameEdit.text()
if self.originType=="species":
self.originNode.number=float(self.numberEdit.text())
if self.originType=="reaction":
self.originNode.number=float(self.numberEdit.text())
self.originNode.updateNode()
self.close()
def CancelPressed(self,pressed):
#do nothing when cancel is pressed - delete widget and no not save changes
self.close()
# general class for all node objects
class nodeBox(QGraphicsItem):
def __init__(self,parent,position,objectTitle,number):
global nodeFont
global plugSideLength
global plugWidth
global reactionsList
global speciesList
self.parent=parent
super(nodeBox,self).__init__()
self.createNode(self,position,objectTitle,number)
def createNode(self,parent,position,objectTitle,number):
#add central box
self.name=objectTitle
self.number=number
self.textH=getTextHeight(self.name)
self.titleTextW=getTextWidth(self.name)
self.numberTextW=getTextWidth(str(self.number))
self.nodeBoxW,self.nodeBoxH=getNodeWH(self.textH,self.titleTextW,self.numberTextW)
#calculate node width and height
self.width=self.nodeBoxW+2*plugWidth
self.height=self.nodeBoxH
#move node center to cursor location
self.setPos(0,0)
def boundingRect(self):
return QRectF(0,0,self.width,self.height)
def paint(self,painter,option,widget):
global nodeFont
global plugWidth
painter.setRenderHint(QPainter.Antialiasing)
rect=QPainterPath()
brush=QBrush(self.boxColor)
rect.addRoundedRect(QRectF(0+plugWidth,0,self.nodeBoxW,self.nodeBoxH),10,10)
painter.setPen(QPen(Qt.SolidLine))
painter.setFont(nodeFont)
painter.fillPath(rect,self.boxColor)
painter.drawPath(rect)
#painter.fillRect(0+plugWidth,0,self.nodeBoxW,self.nodeBoxH,QBrush(self.boxColor))
painter.drawText(int(0+plugWidth+self.nodeBoxW*0.5-self.titleTextW*0.5),0+self.textH,self.name)
painter.drawText(int(0+plugWidth+self.nodeBoxW*0.5-self.numberTextW*0.5),0+2*self.textH,str(self.number))
if self.parent.selected==True:
painter.setPen(QPen(Qt.DashLine))
painter.drawRect(self.boundingRect())
self.update()
def contextMenuEvent(self,event):
menu=QMenu()
editAction=QAction('Edit',None)
editAction.triggered.connect(self.editNode)
menu.addAction(editAction)
deleteAction=QAction('Delete',None)
deleteAction.triggered.connect(self.deleteNode)
menu.addAction(deleteAction)
if self.parentItem().selected==True:
selectionText='Unselect'
else:
selectionText='Select'
selectAction=QAction(selectionText,None)
selectAction.triggered.connect(self.selectNode)
menu.addAction(selectAction)
menu.exec_(event.screenPos())
def editNode(self):
editWidget=editNodes(self,self.parent.nodeType,self.name, self.number)
def selectNode(self):
if self.parentItem().selected==True:
self.parentItem().selected=False
else:
self.parentItem().selected=True
def deleteNode(self):
self.deleteList=[]
#clean up all connections related to this node
for connection in connectionsList:
if connection.startNode.parentItem() == self.parentItem() or connection.endNode.parentItem() == self.parentItem():
connection.selected=True
self.deleteList.append(connection)
for connection in self.deleteList:
connectionsList.remove(connection)
AppWindow.canvas.removeItem(connection)
#if parent object is species, clean up speciesList
if isinstance(self.parentItem(),speciesNode):
for node in speciesList:
if node==self.parentItem():
speciesList.remove(node)
#if parent object is reaction, clean up reactionsList
if isinstance(self.parentItem(),reactionAtoBNode) or isinstance(self.parentItem(),reactionAtoBCNode) or isinstance(self.parentItem(),reactionABtoCNode):
for node in reactionsList:
if node==self.parentItem():
reactionsList.remove(node)
AppWindow.canvas.removeItem(self.parentItem())
def updateNode(self):
self.textH=getTextHeight(self.name)
self.titleTextW=getTextWidth(self.name)
self.numberTextW=getTextWidth(str(self.number))
self.nodeBoxW,self.nodeBoxH=getNodeWH(self.textH,self.titleTextW,self.numberTextW)
self.width=self.nodeBoxW+2*plugWidth
self.height=self.nodeBoxH
#update position of outgoing plugs
for item in self.parentItem().childItems():
if isinstance(item,plug) and item.mode=="out":
item.x=self.width-plugWidth
item.updateCoords()
#species node class
class speciesNode(QGraphicsItem):
def __init__(self,position,objectTitle,number):
super(speciesNode,self).__init__()
global nodeFont
global plugSideLength
global plugWidth
global speciesFillColor
self.selected=False
self.nodeType="species"
self.createNode(position,objectTitle,number)
self.setZValue(1)
def createNode(self,position,objectTitle,number):
self.nodeBox=nodeBox(self,position,objectTitle,number)
self.nodeBox.setParentItem(self)
self.nodeBox.boxColor=speciesFillColor
self.setPos(int(position.x()-self.nodeBox.width/2),int(position.y()-self.nodeBox.height/2))
self.nodePlugIn=plug(0,self.nodeBox.height/2-plugSideLength/2,"in","in1")
self.nodePlugIn.setParentItem(self)
self.nodePlugOut=plug(self.nodeBox.width-plugWidth,self.nodeBox.height/2-plugSideLength/2,"out","out1")
self.nodePlugOut.setParentItem(self)
def boundingRect(self):
return self.nodeBox.boundingRect()
def updateCoords(self,position):
self.setPos(position.x()-self.nodeBox.width/2,position.y()-self.nodeBox.height/2)
def paint(self,painter,option,widget):
pass
#reaction A to B node class
class reactionAtoBNode(QGraphicsItem):
def __init__(self,position,objectTitle,number):
super(reactionAtoBNode,self).__init__()
global nodeFont
global plugSideLength
global plugWidth
global reactionsFillColor
self.selected=False
self.nodeType="reaction"
self.createNode(position,objectTitle,number)
self.setZValue(1)
def createNode(self,position,objectTitle,number):
self.nodeBox=nodeBox(self,position,objectTitle,number)
self.nodeBox.setParentItem(self)
self.nodeBox.boxColor=reactionsFillColor
self.setPos(int(position.x()-self.nodeBox.width/2),int(position.y()-self.nodeBox.height/2))
self.nodePlugIn=plug(0,self.nodeBox.height/2-plugSideLength/2,"in","in1")
self.nodePlugIn.setParentItem(self)
self.nodePlugOut=plug(self.nodeBox.width-plugWidth,self.nodeBox.height/2-plugSideLength/2,"out","out1")
self.nodePlugOut.setParentItem(self)
def boundingRect(self):
return self.nodeBox.boundingRect()
def updateCoords(self,position):
self.setPos(position.x()-self.nodeBox.width/2,position.y()-self.nodeBox.height/2)
def paint(self,painter,option,widget):
pass
#reaction AB to C node class
class reactionABtoCNode(QGraphicsItem):
def __init__(self,position,objectTitle,number):
super(reactionABtoCNode,self).__init__()
global nodeFont
global plugSideLength
global plugWidth
global reactionsFillColor
self.selected=False
self.nodeType="reaction"
self.createNode(position,objectTitle,number)
self.setZValue(1)
def createNode(self,position,objectTitle,number):
self.nodeBox=nodeBox(self,position,objectTitle,number)
self.nodeBox.setParentItem(self)
self.nodeBox.boxColor=reactionsFillColor
self.setPos(int(position.x()-self.nodeBox.width/2),int(position.y()-self.nodeBox.height/2))
self.nodePlugIn1=plug(0,(self.nodeBox.height-2*plugSideLength)/3,"in","in1")
self.nodePlugIn1.setParentItem(self)
self.nodePlugIn2=plug(0,plugSideLength+2*(self.nodeBox.height-2*plugSideLength)/3,"in","in2")
self.nodePlugIn2.setParentItem(self)
self.nodePlugOut=plug(self.nodeBox.width-plugWidth,self.nodeBox.height/2-plugSideLength/2,"out","out1")
self.nodePlugOut.setParentItem(self)
def boundingRect(self):
return self.nodeBox.boundingRect()
def updateCoords(self,position):
self.setPos(position.x()-self.nodeBox.width/2,position.y()-self.nodeBox.height/2)
def paint(self,painter,option,widget):
pass
#reaction A to BC node class
class reactionAtoBCNode(QGraphicsItem):
def __init__(self,position,objectTitle,number):
super(reactionAtoBCNode,self).__init__()
global nodeFont
global plugSideLength
global plugWidth
global reactionsFillColor
self.selected=False
self.nodeType="reaction"
self.createNode(position,objectTitle,number)
self.setZValue(1)
def createNode(self,position,objectTitle,number):
self.nodeBox=nodeBox(self,position,objectTitle,number)
self.nodeBox.setParentItem(self)
self.nodeBox.boxColor=reactionsFillColor
self.setPos(int(position.x()-self.nodeBox.width/2),int(position.y()-self.nodeBox.height/2))
self.nodePlugIn=plug(0,self.nodeBox.height/2-plugSideLength/2,"in","in1")
self.nodePlugIn.setParentItem(self)
self.nodePlugOut1=plug(self.nodeBox.width-plugWidth,(self.nodeBox.height-2*plugSideLength)/3,"out","out1")
self.nodePlugOut1.setParentItem(self)
self.nodePlugOut2=plug(self.nodeBox.width-plugWidth,plugSideLength+2*(self.nodeBox.height-2*plugSideLength)/3,"out","out2")
self.nodePlugOut2.setParentItem(self)
def boundingRect(self):
return self.nodeBox.boundingRect()
def updateCoords(self,position):
self.setPos(position.x()-self.nodeBox.width/2,0+position.y()-self.nodeBox.height/2)
def paint(self,painter,option,widget):
pass
#reaction AB to CD node class
class reactionABtoCDNode(QGraphicsItem):
def __init__(self,position,objectTitle,number):
super(reactionABtoCDNode,self).__init__()
global nodeFont
global plugSideLength
global plugWidth
global reactionsFillColor
self.selected=False
self.nodeType="reaction"
self.createNode(position,objectTitle,number)
self.setZValue(1)
def createNode(self,position,objectTitle,number):
self.nodeBox=nodeBox(self,position,objectTitle,number)
self.nodeBox.setParentItem(self)
self.nodeBox.boxColor=reactionsFillColor
self.setPos(int(position.x()-self.nodeBox.width/2),int(position.y()-self.nodeBox.height/2))
self.nodePlugIn1=plug(0,(self.nodeBox.height-2*plugSideLength)/3,"in","in1")
self.nodePlugIn1.setParentItem(self)
self.nodePlugIn2=plug(0,plugSideLength+2*(self.nodeBox.height-2*plugSideLength)/3,"in","in2")
self.nodePlugIn2.setParentItem(self)
self.nodePlugOut1=plug(self.nodeBox.width-plugWidth,(self.nodeBox.height-2*plugSideLength)/3,"out","out1")
self.nodePlugOut1.setParentItem(self)
self.nodePlugOut2=plug(self.nodeBox.width-plugWidth,plugSideLength+2*(self.nodeBox.height-2*plugSideLength)/3,"out","out2")
self.nodePlugOut2.setParentItem(self)
def boundingRect(self):
return self.nodeBox.boundingRect()
def updateCoords(self,position):
self.setPos(position.x()-self.nodeBox.width/2,0+position.y()-self.nodeBox.height/2)
def paint(self,painter,option,widget):
pass
#reaction A to BCD node class
class reactionAtoBCDNode(QGraphicsItem):
def __init__(self,position,objectTitle,number):
super(reactionAtoBCDNode,self).__init__()
global nodeFont
global plugSideLength
global plugWidth
global reactionsFillColor
self.selected=False
self.nodeType="reaction"
self.createNode(position,objectTitle,number)
self.setZValue(1)
def createNode(self,position,objectTitle,number):
self.nodeBox=nodeBox(self,position,objectTitle,number)
self.nodeBox.setParentItem(self)
self.nodeBox.boxColor=reactionsFillColor
self.setPos(int(position.x()-self.nodeBox.width/2),int(position.y()-self.nodeBox.height/2))
self.nodePlugIn1=plug(0,(self.nodeBox.height-2*plugSideLength)/3,"in","in1")
self.nodePlugIn=plug(0,self.nodeBox.height/2-plugSideLength/2,"in","in1")
self.nodePlugIn.setParentItem(self)
self.nodePlugOut1=plug(self.nodeBox.width-plugWidth,(self.nodeBox.height-3*plugSideLength)/4,"out","out1")
self.nodePlugOut1.setParentItem(self)
self.nodePlugOut2=plug(self.nodeBox.width-plugWidth,plugSideLength+2*(self.nodeBox.height-3*plugSideLength)/4,"out","out2")
self.nodePlugOut2.setParentItem(self)
self.nodePlugOut3=plug(self.nodeBox.width-plugWidth,2*plugSideLength+3*(self.nodeBox.height-3*plugSideLength)/4,"out","out3")
self.nodePlugOut3.setParentItem(self)
def boundingRect(self):
return self.nodeBox.boundingRect()
def updateCoords(self,position):
self.setPos(position.x()-self.nodeBox.width/2,0+position.y()-self.nodeBox.height/2)
def paint(self,painter,option,widget):
pass
#reaction AB to CDE node class
class reactionABtoCDENode(QGraphicsItem):
def __init__(self,position,objectTitle,number):
super(reactionABtoCDENode,self).__init__()
global nodeFont
global plugSideLength
global plugWidth
global reactionsFillColor
self.selected=False
self.nodeType="reaction"
self.createNode(position,objectTitle,number)
self.setZValue(1)
def createNode(self,position,objectTitle,number):
self.nodeBox=nodeBox(self,position,objectTitle,number)
self.nodeBox.setParentItem(self)
self.nodeBox.boxColor=reactionsFillColor
self.setPos(int(position.x()-self.nodeBox.width/2),int(position.y()-self.nodeBox.height/2))
self.nodePlugIn1=plug(0,(self.nodeBox.height-2*plugSideLength)/3,"in","in1")
self.nodePlugIn1.setParentItem(self)
self.nodePlugIn2=plug(0,plugSideLength+2*(self.nodeBox.height-2*plugSideLength)/3,"in","in2")
self.nodePlugIn2.setParentItem(self)
self.nodePlugOut1=plug(self.nodeBox.width-plugWidth,(self.nodeBox.height-3*plugSideLength)/4,"out","out1")
self.nodePlugOut1.setParentItem(self)
self.nodePlugOut2=plug(self.nodeBox.width-plugWidth,plugSideLength+2*(self.nodeBox.height-3*plugSideLength)/4,"out","out2")
self.nodePlugOut2.setParentItem(self)
self.nodePlugOut3=plug(self.nodeBox.width-plugWidth,2*plugSideLength+3*(self.nodeBox.height-3*plugSideLength)/4,"out","out3")
self.nodePlugOut3.setParentItem(self)
def boundingRect(self):
return self.nodeBox.boundingRect()
def updateCoords(self,position):
self.setPos(position.x()-self.nodeBox.width/2,0+position.y()-self.nodeBox.height/2)
def paint(self,painter,option,widget):
pass
# class for plug items
class plug(QGraphicsItem):
def __init__(self,x,y,mode,name):
super(plug,self).__init__()
self.x=x
self.y=y
self.mode=mode
self.name=name
global plugSideLength
self.centre=QPointF(self.x+0.5*plugWidth,self.y+plugSideLength/2)
self.triangle=QPolygonF()
self.triangle.append(QPointF(self.x,self.y))
self.triangle.append(QPointF(self.x,self.y+plugSideLength))
self.triangle.append(QPointF(self.x+plugSideLength*(math.sqrt(3)/2),self.y+plugSideLength/2))
def boundingRect(self):
return QRectF(self.x,self.y,plugSideLength*(math.sqrt(3)/2),plugSideLength)
def paint(self,painter,option,widget):
painter.setBrush(QBrush(QColor(150,150,150)))
painter.setPen(QPen(Qt.SolidLine))
painter.drawPolygon(self.triangle)
#painter.drawEllipse(self.centre,2,2)
self.update()
def updateCoords(self):
self.triangle=QPolygonF()
self.triangle.append(QPointF(self.x,self.y))
self.triangle.append(QPointF(self.x,self.y+plugSideLength))
self.triangle.append(QPointF(self.x+plugSideLength*(math.sqrt(3)/2),self.y+plugSideLength/2))
self.centre=QPointF(self.x+0.5*plugWidth,self.y+plugSideLength/2)
# class for connection items
class connection(QGraphicsItem):
def __init__(self,startNode,endNode):
super(connection,self).__init__()
self.startNode=startNode
self.endNode=endNode
self.selected=False
self.itemIsMovable=True
global lineInteractionRange
global connectionsList
self.setZValue=0
def boundingRect(self):
#get the top left corner coordinates and height/width of connection
if self.startNode.scenePos().x()+self.startNode.centre.x()<=self.endNode.scenePos().x()+self.endNode.centre.x():
self.topCornerX=self.startNode.scenePos().x()+self.startNode.centre.x()-lineInteractionRange
self.bottomCornerX=self.endNode.scenePos().x()+self.endNode.centre.x()+lineInteractionRange
else:
self.topCornerX=self.endNode.scenePos().x()+self.endNode.centre.x()-lineInteractionRange
self.bottomCornerX=self.startNode.scenePos().x()+self.startNode.centre.x()+lineInteractionRange
self.boundingRectHeight=self.bottomCornerX-self.topCornerX
if self.startNode.scenePos().y()+self.startNode.centre.y()<=self.endNode.scenePos().y()+self.endNode.centre.y():
self.topCornerY=self.startNode.scenePos().y()+self.startNode.centre.y()-lineInteractionRange
self.bottomCornerY=self.endNode.scenePos().y()+self.endNode.centre.y()+lineInteractionRange
else:
self.topCornerY=self.endNode.scenePos().y()+self.endNode.centre.y()-lineInteractionRange
self.bottomCornerY=self.startNode.scenePos().y()+self.startNode.centre.y()+lineInteractionRange
self.boundingRectWidth=self.bottomCornerY-self.topCornerY
return QRectF(self.topCornerX,self.topCornerY,self.boundingRectHeight,self.boundingRectWidth)
self.update()
def paint(self,painter,option,widget):
#painter.setBrush(QBrush(QColor(200,150,150)))
painter.setPen(QPen(Qt.SolidLine))
painter.drawLine(self.startNode.scenePos().x()+self.startNode.centre.x(),self.startNode.scenePos().y()+self.startNode.centre.y(),self.endNode.scenePos().x()+self.endNode.centre.x(),self.endNode.scenePos().y()+self.endNode.centre.y())
self.selectionArea=self.createSelectionArea()
if self.selected==True:
painter.setPen(QPen(Qt.DashLine))
painter.drawPolygon(self.selectionArea)
self.update()
def createSelectionArea(self):
if self.endNode.scenePos().y()+self.endNode.centre.y()-self.startNode.scenePos().y()-self.startNode.centre.y() ==0:
slope =1
else:
slope=(self.endNode.scenePos().x()+self.endNode.centre.x()-self.startNode.scenePos().x()-self.startNode.centre.x())/(self.endNode.scenePos().y()+self.endNode.centre.y()-self.startNode.scenePos().y()-self.startNode.centre.y())
slopeRadians=math.atan(slope)
mouseInteractionBox=QPolygonF()
point1x=self.startNode.scenePos().x()+self.startNode.centre.x()+math.sqrt(lineInteractionRange**2/(1+slope**2))
point1y=self.startNode.scenePos().y()+self.startNode.centre.y()+(-1)*slope*math.sqrt(lineInteractionRange**2/(1+slope**2))
mouseInteractionBox.append(QPointF(point1x,point1y))
point2x=self.startNode.scenePos().x()+self.startNode.centre.x()-math.sqrt(lineInteractionRange**2/(1+slope**2))
point2y=self.startNode.scenePos().y()+self.startNode.centre.y()-(-1)*slope*math.sqrt(lineInteractionRange**2/(1+slope**2))
mouseInteractionBox.append(QPointF(point2x,point2y))
point3x=self.endNode.scenePos().x()+self.endNode.centre.x()-math.sqrt(lineInteractionRange**2/(1+slope**2))
point3y=self.endNode.scenePos().y()+self.endNode.centre.y()-(-1)*slope*math.sqrt(lineInteractionRange**2/(1+slope**2))
mouseInteractionBox.append(QPointF(point3x,point3y))
point4x=self.endNode.scenePos().x()+self.endNode.centre.x()+math.sqrt(lineInteractionRange**2/(1+slope**2))
point4y=self.endNode.scenePos().y()+self.endNode.centre.y()+(-1)*slope*math.sqrt(lineInteractionRange**2/(1+slope**2))
mouseInteractionBox.append(QPointF(point4x,point4y))
return mouseInteractionBox
def contextMenuEvent(self,event):
menu=QMenu()
deleteAction=QAction('Delete',None)
deleteAction.triggered.connect(self.deleteConnection)
menu.addAction(deleteAction)
if self.selected==True:
selectionText='Unselect'
else:
selectionText='Select'
selectAction=QAction(selectionText,None)
selectAction.triggered.connect(self.selectConnection)
menu.addAction(selectAction)
if self.selectionArea.containsPoint(event.scenePos(),Qt.OddEvenFill):
menu.exec_(event.screenPos())
def selectConnection(self):
if self.selected==True:
self.selected=False
else:
self.selected=True
def deleteConnection(self):
#clean up connectionsList
for connection in connectionsList:
if connection==self:
connectionsList.remove(connection)
AppWindow.canvas.removeItem(self)
selectableObjects=(speciesNode,reactionAtoBNode,reactionAtoBCNode,reactionABtoCNode,connection)
# Graphics scene
class DrawingArea(QGraphicsScene):
def __init__(self,parent):
super(DrawingArea,self).__init__(parent)
self.setSceneRect(0,0,1000,1000)
def mousePressEvent(self,event):
global movingItem
global isMoving
global isConnecting
global connectionStart
global connectionEnd
self.clickedItem=self.itemAt(event.scenePos(),QTransform())
#if event.button()==Qt.RightButton:
if event.button()==Qt.LeftButton:
self.connectionPresent=False
self.nodePresent=False
self.plugPresent=False
#check what items are at mouse press position
for items in self.items(event.scenePos()):
if isinstance(items,plug)==True and self.plugPresent==False:
self.plugPresent=True
if isinstance(items,connection)==True and items.selectionArea.containsPoint(event.scenePos(),Qt.OddEvenFill)and self.connectionPresent==False:
self.connectionPresent=True
if isinstance(items,nodeBox)==True and self.nodePresent==False:
self.nodePresent=True
#if clicked on empty space with node creation tool, create that node
if self.itemAt(event.scenePos(),QTransform()) == None and AppWindow.canvas.currentTool !="unselected":
createNode(AppWindow.canvas.currentTool,event.scenePos())
#if clicked on plug and not on connection, create connection
if self.plugPresent == True and self.connectionPresent == False:
if isinstance(self.itemAt(event.scenePos(),QTransform()),plug):
isConnecting=True
connectionStart=self.itemAt(event.scenePos(),QTransform())
else:
print("should create connection but itemAt scenePos is not plug --> zlevel issue")
#if clicked on node and not on plug or connection, move node
if self.nodePresent ==True and self.connectionPresent==False and self.plugPresent==False:
if isinstance(self.itemAt(event.scenePos(),QTransform()),nodeBox)==True:
isMoving=True
movingItem=self.itemAt(event.scenePos(),QTransform()).parentItem()
else:
print("should be moving item but itemAt scenePos is not nodeBox -->zlevel issue")
def mouseMoveEvent(self,event):
global movingItem
global connectionsList
if movingItem != None:
movingItem.updateCoords(event.scenePos())
for connection in connectionsList:
connection.prepareGeometryChange()
def mouseReleaseEvent(self,event):
global movingItem
global isMoving
global isConnecting
global connectionEnd
if isMoving==True:
isMoving=False
movingItem=None
self.clickedItem=self.itemAt(event.scenePos(),QTransform())
if isinstance(self.clickedItem,plug) and isConnecting==True:
connectionEnd=self.clickedItem
createConnection()
def mouseDoubleClickEvent(self,event):
global selectableObjects
if event.button()==Qt.LeftButton:
self.clickedItem=self.itemAt(event.scenePos(),QTransform())
#if clicked on plug, get parent item
if isinstance(self.clickedItem,nodeBox):
self.clickedItem=self.clickedItem.parentItem()
if isinstance(self.clickedItem,connection) and self.clickedItem.selectionArea.containsPoint(event.scenePos(),Qt.OddEvenFill):
pass
#if double click on selectable objects, toggle selection
if isinstance(self.clickedItem, selectableObjects):
if self.clickedItem.selected==False:
self.clickedItem.selected=True
else:
self.clickedItem.selected=False
#class for the main application window
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
global TOOLS
self.initUI()
#add functionalities not defined in initUI module
self.canvas.currentTool="unselected"
#set canvas.currentTool based on the buttons pressed
for tool in TOOLS:
btn=getattr(self, '%sButton' % tool)
btn.pressed.connect(lambda tool=tool: self.setTool(tool))
def setTool(self,tool): #function for storing current tool
self.canvas.currentTool=tool
def clearCanvas(self): #function for clearing all objects - new document
confirmWindow=QMessageBox.question(self, '', "Clear all objects, are you sure?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if confirmWindow == QMessageBox.Yes:
global speciesCounter
global speciesList
global reactionsCounter
global reactionsList
global connectionsCounter
global connectionsList
global isMoving
global movingItem
global isConnecting
global connectionStart
global connectionEnd
speciesCounter=1
speciesList=[]
reactionsCounter=1
reactionsList=[]
connectionsCounter=1
connectionsList=[]
isMoving=False
movingItem=None
isConnecting=False
connectionStart=None
connectionEnd=None
self.canvas.clear()
def saveFile(self): #function for saving all objects to file
global fileName
saveDialog=QFileDialog()
saveDialog.setDefaultSuffix('kmc')
saveDialog.setAcceptMode(QFileDialog.AcceptSave)
saveDialog.setNameFilters(['kinetic Monte Carlo (*.kmc)'])
saveDialog.setOptions(QFileDialog.DontUseNativeDialog)
if saveDialog.exec_() == QDialog.Accepted:
filename=saveDialog.selectedFiles()[0].split(".")[0]
extension=saveDialog.selectedFiles()[0].split(".")[1]
if extension != saveDialog.defaultSuffix():
print('wrong extension, "',extension,'", correcting')
saveFileName=filename+'.'+saveDialog.defaultSuffix()
else:
saveFileName=saveDialog.selectedFiles()[0]
fileName=filename
#save all reactions
outputStream=generateOutputStream()
file=open(saveFileName,"w+")
for line in outputStream:
file.write(str(line)+"\n")
file.close()
def loadFile(self): #function for loading objects from file
global fileName
global speciesCounter
global lastCVector
global lastPVector
global lasttVector
loadDialog=QFileDialog()
loadDialog.setDefaultSuffix('kmc')
loadDialog.setAcceptMode(QFileDialog.AcceptOpen)
loadDialog.setNameFilters(['kinetic Monte Carlo (*.kmc)'])
loadDialog.setOptions(QFileDialog.DontUseNativeDialog)
if loadDialog.exec_() == QDialog.Accepted:
filename=loadDialog.selectedFiles()[0]
fileName=loadDialog.selectedFiles()[0].split(".")[0]
with open(filename,"r") as inputFile:
inputStream=[]
line=inputFile.readline()
while line:
line=line.rstrip()
inputStream.append(line)
line=inputFile.readline()
self.clearCanvas()
readInputStream(inputStream)
#read previous simulation data from file if present
populationFilename=fileName+"_population.csv"
try:
with open(populationFilename,"r") as popInput:
popInStream=[]
line=popInput.readline()
while line:
line=line.rstrip()
popInStream.append(line)
line=popInput.readline()
timeVector=np.array([])
popVector=np.array([])
lineCounter=0
for line in popInStream:
lineList=line.split(" ")
if lineCounter>0:
timeVector=np.append(timeVector,float(lineList[0]))
lineList.pop(0)
if lineCounter==1:
popVector=np.asarray(lineList)
if lineCounter>1:
popVector=np.vstack([popVector,np.asarray(lineList)])
lineCounter+=1
if lineCounter==0:
nameVector=lineList
lineCounter+=1
if len(nameVector)!=speciesCounter+1:
print("incompatible population vector file")
lastPVector=popVector.astype(np.float)
lasttVector=timeVector
#calculate concentration vector from population vector and volume
if len(lasttVector)>0 and len(lastPVector)>0:
lastCVector=np.empty(shape=lastPVector.shape)
x=0
while x< len(lastCVector[:,0]):
y=0
while y< len(lastCVector[x,:]):
lastCVector[x,y]=lastPVector[x,y]/(Na*KMCParams[4])
y+=1
x+=1
except:
print("previous simulation data not available")
def showPlot(self):
self.plotWindow=PlotWindow()
self.plotWindow.show()
def initUI(self):
global speciesList
global reactionsList
global connectionsList
global lasttVector
global lastPVector
#super(MainWindow,self).__init__()
self.resize(800, 800)
self.centralwidget = QGraphicsView(self)
self.centralwidget.setObjectName("centralwidget")
self.setCentralWidget(self.centralwidget)
#add QGraphicsScene widget to draw on
self.canvas=DrawingArea(self)
self.centralwidget.setScene(self.canvas)
self.setMouseTracking(True)
# build menubar
self.mainMenu=QMainWindow.menuBar(self)
self.mainMenu.setNativeMenuBar(False)
# build file menu
self.menuFile = self.mainMenu.addMenu('File')
self.actionNew = QAction('New',self)
self.actionSave=QAction('Save',self)
self.actionOpen = QAction('Open',self)
self.actionExit = QAction('Exit',self)
self.menuFile.addAction(self.actionNew)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionExit)
self.actionNew.triggered.connect(self.clearCanvas)
self.actionSave.triggered.connect(self.saveFile)
self.actionOpen.triggered.connect(self.loadFile)
# build edit menu
self.menuEdit = self.mainMenu.addMenu('Edit')
self.actionEditReactTable = QAction('Reaction table',self)
self.actionKMCParams = QAction('KMC parameters',self)
self.actionRun = QAction('Run',self)
#self.menuEdit.addAction(self.actionEditReactTable)
self.menuEdit.addAction(self.actionKMCParams)
self.menuEdit.addAction(self.actionRun)
self.actionRun.triggered.connect(runKMC)
self.actionKMCParams.triggered.connect(editKMC)
self.plotResults=QAction('Plot results',self)
self.menuEdit.addAction(self.plotResults)
self.plotResults.triggered.connect(self.showPlot)
#build toolbar
self.toolBar = QToolBar()
self.addToolBar(Qt.TopToolBarArea, self.toolBar)
# add button for A-->B reaction
self.reactAtoBButton = QPushButton(self)
self.reactAtoBButton.setObjectName("reactAtoBButton")
self.reactAtoBButton.setCheckable(True)
self.AtoBIcon = QIcon()
self.AtoBIcon.addPixmap(QPixmap("icons/AtoB.png"), QIcon.Normal, QIcon.Off)
self.reactAtoBButton.setIcon(self.AtoBIcon)
# add button for A+B-->C reaction
self.reactABtoCButton = QPushButton(self)
self.reactABtoCButton.setObjectName("reactABtoCButton")
self.reactABtoCButton.setCheckable(True)
self.ABtoCIcon = QIcon()
self.ABtoCIcon.addPixmap(QPixmap("icons/ABtoC.png"), QIcon.Normal, QIcon.Off)
self.reactABtoCButton.setIcon(self.ABtoCIcon)
# add button for A-->B+C reaction
self.reactAtoBCButton = QPushButton(self)
self.reactAtoBCButton.setObjectName("reactAtoBCButton")
self.reactAtoBCButton.setCheckable(True)
self.AtoBCIcon = QIcon()
self.AtoBCIcon.addPixmap(QPixmap("icons/AtoBC.png"), QIcon.Normal, QIcon.Off)
self.reactAtoBCButton.setIcon(self.AtoBCIcon)
# add button for A+B-->C+D reaction
self.reactABtoCDButton = QPushButton(self)
self.reactABtoCDButton.setObjectName("reactABtoCDButton")
self.reactABtoCDButton.setCheckable(True)
self.ABtoCDIcon = QIcon()
self.ABtoCDIcon.addPixmap(QPixmap("icons/ABtoCD.png"), QIcon.Normal, QIcon.Off)
self.reactABtoCDButton.setIcon(self.ABtoCDIcon)
# add button for A-->B+C+D reaction
self.reactAtoBCDButton = QPushButton(self)
self.reactAtoBCDButton.setObjectName("reactAtoBCDButton")
self.reactAtoBCDButton.setCheckable(True)
self.AtoBCDIcon = QIcon()
self.AtoBCDIcon.addPixmap(QPixmap("icons/AtoBCD.png"), QIcon.Normal, QIcon.Off)
self.reactAtoBCDButton.setIcon(self.AtoBCDIcon)
# add button for A+B-->C+D+E reaction
self.reactABtoCDEButton = QPushButton(self)
self.reactABtoCDEButton.setObjectName("reactABtoCDEButton")
self.reactABtoCDEButton.setCheckable(True)
self.ABtoCDEIcon = QIcon()
self.ABtoCDEIcon.addPixmap(QPixmap("icons/ABtoCDE.png"), QIcon.Normal, QIcon.Off)
self.reactABtoCDEButton.setIcon(self.ABtoCDEIcon)
# add button for species
self.speciesButton = QPushButton(self)
self.speciesButton.setObjectName("speciesButton")
self.speciesButton.setCheckable(True)
self.speciesIcon = QIcon()
self.speciesIcon.addPixmap(QPixmap("icons/species.png"), QIcon.Normal, QIcon.Off)
self.speciesButton.setIcon(self.speciesIcon)
#add buttons to toolbar
self.toolBar.addWidget(self.reactAtoBButton)
self.toolBar.addWidget(self.reactABtoCButton)
self.toolBar.addWidget(self.reactAtoBCButton)
self.toolBar.addWidget(self.reactABtoCDButton)
self.toolBar.addWidget(self.reactAtoBCDButton)
self.toolBar.addWidget(self.reactABtoCDEButton)
self.toolBar.addWidget(self.speciesButton)
#set tool buttons as exclusive
self.toolGroup=QButtonGroup(self)
self.toolGroup.setExclusive(True)
self.toolGroup.addButton(self.reactAtoBButton)
self.toolGroup.addButton(self.reactABtoCButton)
self.toolGroup.addButton(self.reactAtoBCButton)
self.toolGroup.addButton(self.reactABtoCDButton)
self.toolGroup.addButton(self.reactAtoBCDButton)
self.toolGroup.addButton(self.reactABtoCDEButton)
self.toolGroup.addButton(self.speciesButton)
#initialize main app window if program is called
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
AppWindow=MainWindow()
AppWindow.show()
sys.exit(app.exec_())
| 37.494368
| 300
| 0.762367
|
092852d84578c73be83e6b2d059814fe9e9fa719
| 742
|
py
|
Python
|
fitness-backend/src/app/models/gym.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
fitness-backend/src/app/models/gym.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
fitness-backend/src/app/models/gym.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
from . import *
class Gym(Base):
__tablename__ = 'gyms'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False, unique=True)
equipment = db.Column(db.String(1500))
image_url = db.Column(db.String(1500), default="")
is_gym = db.Column(db.Boolean, nullable=False, default=False)
location_gym_id = db.Column(
db.Integer,
db.ForeignKey('gyms.id', ondelete='CASCADE')
)
location_gym = db.relationship('Gym', remote_side=[id])
def __init__(self, **kwargs):
self.equipment = kwargs.get('equipment')
self.image_url = kwargs.get('image_url')
self.is_gym = kwargs.get('is_gym')
self.location_id = kwargs.get('location_gym_id')
self.name = kwargs.get('name')
| 30.916667
| 63
| 0.683288
|
12921869cfc5f2001e8c5a6e6c4a2604c12ed722
| 1,008
|
py
|
Python
|
plugins/salesforce/komand_salesforce/actions/simple_search/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/salesforce/komand_salesforce/actions/simple_search/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/salesforce/komand_salesforce/actions/simple_search/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
import komand
from .schema import SimpleSearchInput, SimpleSearchOutput
# Custom imports below
from komand.helper import clean
class SimpleSearch(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="simple_search",
description="Execute a simple search for a string",
input=SimpleSearchInput(),
output=SimpleSearchOutput(),
)
def run(self, params={}):
text = params.get("text")
results = self.connection.api.simple_search(text)
flat_results = []
for result in results:
flat_result = {
"type": result.get("attributes", {}).get("type", ""),
"url": result.get("attributes", {}).get("url", ""),
"name": result.get("Name"),
"id": result.get("Id"),
}
flat_result = clean(flat_result)
flat_results.append(flat_result)
return {"search_results": flat_results}
| 29.647059
| 69
| 0.572421
|
468aa788e3f51cb2b1bcea1de5c1537f5d8b7b2b
| 4,116
|
py
|
Python
|
arcade/examples/platform_tutorial/04_add_gravity.py
|
markjoshua12/arcade
|
74a8012a001229cee677acbf2a285ef677c8b691
|
[
"MIT"
] | 1
|
2020-01-18T04:48:38.000Z
|
2020-01-18T04:48:38.000Z
|
arcade/examples/platform_tutorial/04_add_gravity.py
|
markjoshua12/arcade
|
74a8012a001229cee677acbf2a285ef677c8b691
|
[
"MIT"
] | null | null | null |
arcade/examples/platform_tutorial/04_add_gravity.py
|
markjoshua12/arcade
|
74a8012a001229cee677acbf2a285ef677c8b691
|
[
"MIT"
] | null | null | null |
"""
Platformer Game
"""
import arcade
# Constants
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 650
SCREEN_TITLE = "Platformer"
# Constants used to scale our sprites from their original size
CHARACTER_SCALING = 1
TILE_SCALING = 0.5
COIN_SCALING = 0.5
# Movement speed of player, in pixels per frame
PLAYER_MOVEMENT_SPEED = 5
GRAVITY = 1
PLAYER_JUMP_SPEED = 20
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self):
# Call the parent class and set up the window
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# These are 'lists' that keep track of our sprites. Each sprite should
# go into a list.
self.coin_list = None
self.wall_list = None
self.player_list = None
# Separate variable that holds the player sprite
self.player_sprite = None
# Our physics engine
self.physics_engine = None
arcade.set_background_color(arcade.csscolor.CORNFLOWER_BLUE)
def setup(self):
""" Set up the game here. Call this function to restart the game. """
# Create the Sprite lists
self.player_list = arcade.SpriteList()
self.wall_list = arcade.SpriteList()
self.coin_list = arcade.SpriteList()
# Set up the player, specifically placing it at these coordinates.
image_source = ":resources:images/animated_characters/female_adventurer/femaleAdventurer_idle.png"
self.player_sprite = arcade.Sprite(image_source, CHARACTER_SCALING)
self.player_sprite.center_x = 64
self.player_sprite.center_y = 128
self.player_list.append(self.player_sprite)
# Create the ground
# This shows using a loop to place multiple sprites horizontally
for x in range(0, 1250, 64):
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", TILE_SCALING)
wall.center_x = x
wall.center_y = 32
self.wall_list.append(wall)
# Put some crates on the ground
# This shows using a coordinate list to place sprites
coordinate_list = [[512, 96],
[256, 96],
[768, 96]]
for coordinate in coordinate_list:
# Add a crate on the ground
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", TILE_SCALING)
wall.position = coordinate
self.wall_list.append(wall)
# Create the 'physics engine'
self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,
self.wall_list,
GRAVITY)
def on_draw(self):
""" Render the screen. """
# Clear the screen to the background color
arcade.start_render()
# Draw our sprites
self.wall_list.draw()
self.coin_list.draw()
self.player_list.draw()
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.UP or key == arcade.key.W:
if self.physics_engine.can_jump():
self.player_sprite.change_y = PLAYER_JUMP_SPEED
elif key == arcade.key.LEFT or key == arcade.key.A:
self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.LEFT or key == arcade.key.A:
self.player_sprite.change_x = 0
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.player_sprite.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
# Move the player with the physics engine
self.physics_engine.update()
def main():
""" Main method """
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 31.419847
| 106
| 0.615403
|
882dac9e23f0eae9625a8a91da0b41ddb3064ce2
| 2,471
|
py
|
Python
|
pyci/tests/utils.py
|
iliapolo/pyrelease
|
85784c556a0760d560378ef6edcfb32ab87048a5
|
[
"Apache-2.0"
] | 5
|
2018-05-03T15:20:12.000Z
|
2019-12-13T20:19:47.000Z
|
pyci/tests/utils.py
|
iliapolo/pyci
|
85784c556a0760d560378ef6edcfb32ab87048a5
|
[
"Apache-2.0"
] | 54
|
2018-04-09T06:34:50.000Z
|
2020-03-30T06:13:39.000Z
|
pyci/tests/utils.py
|
iliapolo/pyrelease
|
85784c556a0760d560378ef6edcfb32ab87048a5
|
[
"Apache-2.0"
] | null | null | null |
#############################################################################
# Copyright (c) 2018 Eli Polonsky. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
#############################################################################
import shutil
import os
import time
# This whole bit is just so test will import magic mock
# in a unified way from this file, without needing to duplicate this logic.
try:
# python2
# pylint: disable=unused-import
from mock import MagicMock
except ImportError:
# python3
# noinspection PyUnresolvedReferences,PyCompatibility
# pylint: disable=unused-import
from unittest.mock import MagicMock
from pyci.api.utils import generate_setup_py
def create_release(gh, request, sha, name=None, draft=False):
release_name = name or request.node.name
return gh.repo.create_git_release(
tag=release_name,
target_commitish=sha,
name=release_name,
message='',
draft=draft
)
def patch_setup_py(local_repo_path):
with open(os.path.join(local_repo_path, 'setup.py'), 'r') as stream:
setup_py = stream.read()
version = int(round(time.time() * 1000))
setup_py = generate_setup_py(setup_py, '{}'.format(version))
with open(os.path.join(local_repo_path, 'setup.py'), 'w') as stream:
stream.write(setup_py)
return version
def copy_repo(dst):
import pyci
source_path = os.path.abspath(os.path.join(pyci.__file__, os.pardir, os.pardir))
def _copyfile(path):
shutil.copyfile(path, os.path.join(dst, os.path.basename(path)))
code = os.path.join(source_path, 'pyci')
setup_py = os.path.join(source_path, 'setup.py')
spec = os.path.join(source_path, 'pyci.spec')
license_path = os.path.join(source_path, 'LICENSE')
shutil.copytree(code, os.path.join(dst, os.path.basename(code)))
_copyfile(setup_py)
_copyfile(spec)
_copyfile(license_path)
| 30.134146
| 84
| 0.66208
|
380e150f7cc43502bb88f0379d24545a960fb87a
| 14,264
|
py
|
Python
|
.venv/lib/python2.7/site-packages/celery/contrib/migrate.py
|
MansoorHanif/FYP-web-app
|
918008d3b5eedaa904f3e720296afde9d73ac3f4
|
[
"BSD-3-Clause"
] | 4
|
2018-10-19T04:36:20.000Z
|
2020-02-13T16:14:09.000Z
|
.venv/lib/python2.7/site-packages/celery/contrib/migrate.py
|
MansoorHanif/FYP-web-app
|
918008d3b5eedaa904f3e720296afde9d73ac3f4
|
[
"BSD-3-Clause"
] | 3
|
2020-02-11T23:03:45.000Z
|
2021-06-10T18:05:11.000Z
|
oo/lib/python3.5/site-packages/celery/contrib/migrate.py
|
chunky2808/SPOJ-history-Django-App
|
490c58b1593cd3626f0ddc27fdd09c6e8d1c56e1
|
[
"MIT"
] | 1
|
2019-10-26T04:20:52.000Z
|
2019-10-26T04:20:52.000Z
|
# -*- coding: utf-8 -*-
"""Message migration tools (Broker <-> Broker)."""
from __future__ import absolute_import, print_function, unicode_literals
import socket
from functools import partial
from itertools import cycle, islice
from kombu import eventloop, Queue
from kombu.common import maybe_declare
from kombu.utils.encoding import ensure_bytes
from celery.app import app_or_default
from celery.five import python_2_unicode_compatible, string, string_t
from celery.utils.nodenames import worker_direct
from celery.utils.text import str_to_list
__all__ = [
'StopFiltering', 'State', 'republish', 'migrate_task',
'migrate_tasks', 'move', 'task_id_eq', 'task_id_in',
'start_filter', 'move_task_by_id', 'move_by_idmap',
'move_by_taskmap', 'move_direct', 'move_direct_by_id',
]
MOVING_PROGRESS_FMT = """\
Moving task {state.filtered}/{state.strtotal}: \
{body[task]}[{body[id]}]\
"""
class StopFiltering(Exception):
"""Semi-predicate used to signal filter stop."""
@python_2_unicode_compatible
class State(object):
"""Migration progress state."""
count = 0
filtered = 0
total_apx = 0
@property
def strtotal(self):
if not self.total_apx:
return '?'
return string(self.total_apx)
def __repr__(self):
if self.filtered:
return '^{0.filtered}'.format(self)
return '{0.count}/{0.strtotal}'.format(self)
def republish(producer, message, exchange=None, routing_key=None,
remove_props=['application_headers',
'content_type',
'content_encoding',
'headers']):
"""Republish message."""
body = ensure_bytes(message.body) # use raw message body.
info, headers, props = (message.delivery_info,
message.headers, message.properties)
exchange = info['exchange'] if exchange is None else exchange
routing_key = info['routing_key'] if routing_key is None else routing_key
ctype, enc = message.content_type, message.content_encoding
# remove compression header, as this will be inserted again
# when the message is recompressed.
compression = headers.pop('compression', None)
for key in remove_props:
props.pop(key, None)
producer.publish(ensure_bytes(body), exchange=exchange,
routing_key=routing_key, compression=compression,
headers=headers, content_type=ctype,
content_encoding=enc, **props)
def migrate_task(producer, body_, message, queues=None):
"""Migrate single task message."""
info = message.delivery_info
queues = {} if queues is None else queues
republish(producer, message,
exchange=queues.get(info['exchange']),
routing_key=queues.get(info['routing_key']))
def filter_callback(callback, tasks):
def filtered(body, message):
if tasks and body['task'] not in tasks:
return
return callback(body, message)
return filtered
def migrate_tasks(source, dest, migrate=migrate_task, app=None,
queues=None, **kwargs):
"""Migrate tasks from one broker to another."""
app = app_or_default(app)
queues = prepare_queues(queues)
producer = app.amqp.Producer(dest, auto_declare=False)
migrate = partial(migrate, producer, queues=queues)
def on_declare_queue(queue):
new_queue = queue(producer.channel)
new_queue.name = queues.get(queue.name, queue.name)
if new_queue.routing_key == queue.name:
new_queue.routing_key = queues.get(queue.name,
new_queue.routing_key)
if new_queue.exchange.name == queue.name:
new_queue.exchange.name = queues.get(queue.name, queue.name)
new_queue.declare()
return start_filter(app, source, migrate, queues=queues,
on_declare_queue=on_declare_queue, **kwargs)
def _maybe_queue(app, q):
if isinstance(q, string_t):
return app.amqp.queues[q]
return q
def move(predicate, connection=None, exchange=None, routing_key=None,
source=None, app=None, callback=None, limit=None, transform=None,
**kwargs):
"""Find tasks by filtering them and move the tasks to a new queue.
Arguments:
predicate (Callable): Filter function used to decide the messages
to move. Must accept the standard signature of ``(body, message)``
used by Kombu consumer callbacks. If the predicate wants the
message to be moved it must return either:
1) a tuple of ``(exchange, routing_key)``, or
2) a :class:`~kombu.entity.Queue` instance, or
3) any other true value means the specified
``exchange`` and ``routing_key`` arguments will be used.
connection (kombu.Connection): Custom connection to use.
source: List[Union[str, kombu.Queue]]: Optional list of source
queues to use instead of the default (queues
in :setting:`task_queues`). This list can also contain
:class:`~kombu.entity.Queue` instances.
exchange (str, kombu.Exchange): Default destination exchange.
routing_key (str): Default destination routing key.
limit (int): Limit number of messages to filter.
callback (Callable): Callback called after message moved,
with signature ``(state, body, message)``.
transform (Callable): Optional function to transform the return
value (destination) of the filter function.
Also supports the same keyword arguments as :func:`start_filter`.
To demonstrate, the :func:`move_task_by_id` operation can be implemented
like this:
.. code-block:: python
def is_wanted_task(body, message):
if body['id'] == wanted_id:
return Queue('foo', exchange=Exchange('foo'),
routing_key='foo')
move(is_wanted_task)
or with a transform:
.. code-block:: python
def transform(value):
if isinstance(value, string_t):
return Queue(value, Exchange(value), value)
return value
move(is_wanted_task, transform=transform)
Note:
The predicate may also return a tuple of ``(exchange, routing_key)``
to specify the destination to where the task should be moved,
or a :class:`~kombu.entitiy.Queue` instance.
Any other true value means that the task will be moved to the
default exchange/routing_key.
"""
app = app_or_default(app)
queues = [_maybe_queue(app, queue) for queue in source or []] or None
with app.connection_or_acquire(connection, pool=False) as conn:
producer = app.amqp.Producer(conn)
state = State()
def on_task(body, message):
ret = predicate(body, message)
if ret:
if transform:
ret = transform(ret)
if isinstance(ret, Queue):
maybe_declare(ret, conn.default_channel)
ex, rk = ret.exchange.name, ret.routing_key
else:
ex, rk = expand_dest(ret, exchange, routing_key)
republish(producer, message,
exchange=ex, routing_key=rk)
message.ack()
state.filtered += 1
if callback:
callback(state, body, message)
if limit and state.filtered >= limit:
raise StopFiltering()
return start_filter(app, conn, on_task, consume_from=queues, **kwargs)
def expand_dest(ret, exchange, routing_key):
try:
ex, rk = ret
except (TypeError, ValueError):
ex, rk = exchange, routing_key
return ex, rk
def task_id_eq(task_id, body, message):
"""Return true if task id equals task_id'."""
return body['id'] == task_id
def task_id_in(ids, body, message):
"""Return true if task id is member of set ids'."""
return body['id'] in ids
def prepare_queues(queues):
if isinstance(queues, string_t):
queues = queues.split(',')
if isinstance(queues, list):
queues = dict(tuple(islice(cycle(q.split(':')), None, 2))
for q in queues)
if queues is None:
queues = {}
return queues
class Filterer(object):
def __init__(self, app, conn, filter,
limit=None, timeout=1.0,
ack_messages=False, tasks=None, queues=None,
callback=None, forever=False, on_declare_queue=None,
consume_from=None, state=None, accept=None, **kwargs):
self.app = app
self.conn = conn
self.filter = filter
self.limit = limit
self.timeout = timeout
self.ack_messages = ack_messages
self.tasks = set(str_to_list(tasks) or [])
self.queues = prepare_queues(queues)
self.callback = callback
self.forever = forever
self.on_declare_queue = on_declare_queue
self.consume_from = [
_maybe_queue(self.app, q)
for q in consume_from or list(self.queues)
]
self.state = state or State()
self.accept = accept
def start(self):
# start migrating messages.
with self.prepare_consumer(self.create_consumer()):
try:
for _ in eventloop(self.conn, # pragma: no cover
timeout=self.timeout,
ignore_timeouts=self.forever):
pass
except socket.timeout:
pass
except StopFiltering:
pass
return self.state
def update_state(self, body, message):
self.state.count += 1
if self.limit and self.state.count >= self.limit:
raise StopFiltering()
def ack_message(self, body, message):
message.ack()
def create_consumer(self):
return self.app.amqp.TaskConsumer(
self.conn,
queues=self.consume_from,
accept=self.accept,
)
def prepare_consumer(self, consumer):
filter = self.filter
update_state = self.update_state
ack_message = self.ack_message
if self.tasks:
filter = filter_callback(filter, self.tasks)
update_state = filter_callback(update_state, self.tasks)
ack_message = filter_callback(ack_message, self.tasks)
consumer.register_callback(filter)
consumer.register_callback(update_state)
if self.ack_messages:
consumer.register_callback(self.ack_message)
if self.callback is not None:
callback = partial(self.callback, self.state)
if self.tasks:
callback = filter_callback(callback, self.tasks)
consumer.register_callback(callback)
self.declare_queues(consumer)
return consumer
def declare_queues(self, consumer):
# declare all queues on the new broker.
for queue in consumer.queues:
if self.queues and queue.name not in self.queues:
continue
if self.on_declare_queue is not None:
self.on_declare_queue(queue)
try:
_, mcount, _ = queue(
consumer.channel).queue_declare(passive=True)
if mcount:
self.state.total_apx += mcount
except self.conn.channel_errors:
pass
def start_filter(app, conn, filter, limit=None, timeout=1.0,
ack_messages=False, tasks=None, queues=None,
callback=None, forever=False, on_declare_queue=None,
consume_from=None, state=None, accept=None, **kwargs):
"""Filter tasks."""
return Filterer(
app, conn, filter,
limit=limit,
timeout=timeout,
ack_messages=ack_messages,
tasks=tasks,
queues=queues,
callback=callback,
forever=forever,
on_declare_queue=on_declare_queue,
consume_from=consume_from,
state=state,
accept=accept,
**kwargs).start()
def move_task_by_id(task_id, dest, **kwargs):
"""Find a task by id and move it to another queue.
Arguments:
task_id (str): Id of task to find and move.
dest: (str, kombu.Queue): Destination queue.
**kwargs (Any): Also supports the same keyword
arguments as :func:`move`.
"""
return move_by_idmap({task_id: dest}, **kwargs)
def move_by_idmap(map, **kwargs):
"""Move tasks by matching from a ``task_id: queue`` mapping.
Where ``queue`` is a queue to move the task to.
Example:
>>> move_by_idmap({
... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'),
... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'),
... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')},
... queues=['hipri'])
"""
def task_id_in_map(body, message):
return map.get(body['id'])
# adding the limit means that we don't have to consume any more
# when we've found everything.
return move(task_id_in_map, limit=len(map), **kwargs)
def move_by_taskmap(map, **kwargs):
"""Move tasks by matching from a ``task_name: queue`` mapping.
``queue`` is the queue to move the task to.
Example:
>>> move_by_taskmap({
... 'tasks.add': Queue('name'),
... 'tasks.mul': Queue('name'),
... })
"""
def task_name_in_map(body, message):
return map.get(body['task']) # <- name of task
return move(task_name_in_map, **kwargs)
def filter_status(state, body, message, **kwargs):
print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs))
move_direct = partial(move, transform=worker_direct)
move_direct_by_id = partial(move_task_by_id, transform=worker_direct)
move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct)
move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct)
| 34.288462
| 79
| 0.614344
|
819eaaa4421808baa54812a71fdb7aadd449765b
| 10,951
|
py
|
Python
|
stickytape/__init__.py
|
wenoptics/stickytape
|
12f5e64d97be70e9f58c068ce6ca429d0cfba97e
|
[
"BSD-2-Clause"
] | null | null | null |
stickytape/__init__.py
|
wenoptics/stickytape
|
12f5e64d97be70e9f58c068ce6ca429d0cfba97e
|
[
"BSD-2-Clause"
] | null | null | null |
stickytape/__init__.py
|
wenoptics/stickytape
|
12f5e64d97be70e9f58c068ce6ca429d0cfba97e
|
[
"BSD-2-Clause"
] | null | null | null |
import os.path
import codecs
import subprocess
import ast
import sys
def script(path, add_python_modules=None, add_python_paths=None, python_binary=None):
if add_python_modules is None:
add_python_modules = []
if add_python_paths is None:
add_python_paths = []
python_paths = [os.path.dirname(path)] + add_python_paths + _read_sys_path_from_python_bin(python_binary)
output = []
output.append(_prelude())
output.append(_generate_module_writers(
path,
sys_path=python_paths,
add_python_modules=add_python_modules,
))
output.append(_indent(open(path).read()))
return "".join(output)
def _read_sys_path_from_python_bin(binary_path):
if binary_path is None:
return []
else:
output = subprocess.check_output(
[binary_path, "-E", "-c", "import sys;\nfor path in sys.path: print(path)"],
)
return [
# TODO: handle non-UTF-8 encodings
line.strip().decode("utf-8")
for line in output.split(b"\n")
if line.strip()
]
def _indent(string):
return " " + string.replace("\n", "\n ")
def _prelude():
prelude_path = os.path.join(os.path.dirname(__file__), "prelude.py")
with open(prelude_path) as prelude_file:
return prelude_file.read()
def _generate_module_writers(path, sys_path, add_python_modules):
generator = ModuleWriterGenerator(sys_path)
generator.generate_for_file(path, add_python_modules=add_python_modules)
return generator.build()
class ModuleWriterGenerator(object):
def __init__(self, sys_path):
self._sys_path = sys_path
self._modules = {}
def build(self):
output = []
for module_path, module_source in _iteritems(self._modules):
output.append(" __stickytape_write_module({0}, {1})\n".format(
_string_escape(module_path),
_string_escape(module_source)
))
return "".join(output)
def generate_for_file(self, python_file_path, add_python_modules):
self._generate_for_module(ImportTarget(python_file_path, "."))
for add_python_module in add_python_modules:
import_line = ImportLine(import_path=add_python_module, items=[])
self._generate_for_import(python_module=None, import_line=import_line)
def _generate_for_module(self, python_module):
import_lines = _find_imports_in_file(python_module.absolute_path)
for import_line in import_lines:
if not _is_stdlib_import(import_line):
self._generate_for_import(python_module, import_line)
def _generate_for_import(self, python_module, import_line):
import_targets = self._read_possible_import_targets(python_module, import_line)
for import_target in import_targets:
if import_target.module_path not in self._modules:
self._modules[import_target.module_path] = import_target.read()
self._generate_for_module(import_target)
def _read_possible_import_targets(self, python_module, import_line):
import_path_parts = import_line.import_path.split("/")
possible_init_module_paths = [
os.path.join(os.path.join(*import_path_parts[0:index + 1]), "__init__.py")
for index in range(len(import_path_parts))
]
possible_module_paths = [import_line.import_path + ".py"] + possible_init_module_paths
for item in import_line.items:
possible_module_paths += [
os.path.join(import_line.import_path, item + ".py"),
os.path.join(import_line.import_path, item, "__init__.py")
]
import_targets = [
self._find_module(python_module, module_path)
for module_path in possible_module_paths
]
valid_import_targets = [target for target in import_targets if target is not None]
return valid_import_targets
# TODO: allow the user some choice in what happens in this case?
# Detection of try/except blocks is possibly over-complicating things
#~ if len(valid_import_targets) > 0:
#~ return valid_import_targets
#~ else:
#~ raise RuntimeError("Could not find module: " + import_line.import_path)
def _find_module(self, importing_python_module, module_path):
if importing_python_module is not None:
relative_module_path = os.path.join(os.path.dirname(importing_python_module.absolute_path), module_path)
if os.path.exists(relative_module_path):
return ImportTarget(relative_module_path, os.path.join(os.path.dirname(importing_python_module.module_path), module_path))
for sys_path in self._sys_path:
full_module_path = os.path.join(sys_path, module_path)
if os.path.exists(full_module_path):
return ImportTarget(full_module_path, module_path)
return None
def _find_imports_in_file(file_path):
source = _read_file(file_path)
parse_tree = ast.parse(source, file_path)
for node in ast.walk(parse_tree):
if isinstance(node, ast.Import):
for name in node.names:
yield ImportLine(name.name, [])
if isinstance(node, ast.ImportFrom):
if node.module is None:
module = "."
else:
module = node.module
yield ImportLine(module, [name.name for name in node.names])
def _resolve_package_to_import_path(package):
import_path = package.replace(".", "/")
if import_path.startswith("/"):
return "." + import_path
else:
return import_path
def _read_file(path):
with open(path) as file:
return file.read()
def _is_stdlib_import(import_line):
return import_line.import_path in _stdlib_modules
class ImportTarget(object):
def __init__(self, absolute_path, module_path):
self.absolute_path = absolute_path
self.module_path = os.path.normpath(module_path)
def read(self):
return _read_file(self.absolute_path)
class ImportLine(object):
def __init__(self, import_path, items):
self.import_path = _resolve_package_to_import_path(import_path)
self.items = items
_stdlib_modules = set([
"string",
"re",
"struct",
"difflib",
"StringIO",
"cStringIO",
"textwrap",
"codecs",
"unicodedata",
"stringprep",
"fpformat",
"datetime",
"calendar",
"collections",
"heapq",
"bisect",
"array",
"sets",
"sched",
"mutex",
"Queue",
"weakref",
"UserDict",
"UserList",
"UserString",
"types",
"new",
"copy",
"pprint",
"repr",
"numbers",
"math",
"cmath",
"decimal",
"fractions",
"random",
"itertools",
"functools",
"operator",
"os/path",
"fileinput",
"stat",
"statvfs",
"filecmp",
"tempfile",
"glob",
"fnmatch",
"linecache",
"shutil",
"dircache",
"macpath",
"pickle",
"cPickle",
"copy_reg",
"shelve",
"marshal",
"anydbm",
"whichdb",
"dbm",
"gdbm",
"dbhash",
"bsddb",
"dumbdbm",
"sqlite3",
"zlib",
"gzip",
"bz2",
"zipfile",
"tarfile",
"csv",
"ConfigParser",
"robotparser",
"netrc",
"xdrlib",
"plistlib",
"hashlib",
"hmac",
"md5",
"sha",
"os",
"io",
"time",
"argparse",
"optparse",
"getopt",
"logging",
"logging/config",
"logging/handlers",
"getpass",
"curses",
"curses/textpad",
"curses/ascii",
"curses/panel",
"platform",
"errno",
"ctypes",
"select",
"threading",
"thread",
"dummy_threading",
"dummy_thread",
"multiprocessing",
"mmap",
"readline",
"rlcompleter",
"subprocess",
"socket",
"ssl",
"signal",
"popen2",
"asyncore",
"asynchat",
"email",
"json",
"mailcap",
"mailbox",
"mhlib",
"mimetools",
"mimetypes",
"MimeWriter",
"mimify",
"multifile",
"rfc822",
"base64",
"binhex",
"binascii",
"quopri",
"uu",
"HTMLParser",
"sgmllib",
"htmllib",
"htmlentitydefs",
"xml/etree/ElementTree",
"xml/dom",
"xml/dom/minidom",
"xml/dom/pulldom",
"xml/sax",
"xml/sax/handler",
"xml/sax/saxutils",
"xml/sax/xmlreader",
"xml/parsers/expat",
"webbrowser",
"cgi",
"cgitb",
"wsgiref",
"urllib",
"urllib2",
"httplib",
"ftplib",
"poplib",
"imaplib",
"nntplib",
"smtplib",
"smtpd",
"telnetlib",
"uuid",
"urlparse",
"SocketServer",
"BaseHTTPServer",
"SimpleHTTPServer",
"CGIHTTPServer",
"cookielib",
"Cookie",
"xmlrpclib",
"SimpleXMLRPCServer",
"DocXMLRPCServer",
"audioop",
"imageop",
"aifc",
"sunau",
"wave",
"chunk",
"colorsys",
"imghdr",
"sndhdr",
"ossaudiodev",
"gettext",
"locale",
"cmd",
"shlex",
"Tkinter",
"ttk",
"Tix",
"ScrolledText",
"turtle",
"pydoc",
"doctest",
"unittest",
"test",
"test/test_support",
"bdb",
"pdb",
"hotshot",
"timeit",
"trace",
"sys",
"sysconfig",
"__builtin__",
"future_builtins",
"__main__",
"warnings",
"contextlib",
"abc",
"atexit",
"traceback",
"__future__",
"gc",
"inspect",
"site",
"user",
"fpectl",
"distutils",
"code",
"codeop",
"rexec",
"Bastion",
"imp",
"importlib",
"imputil",
"zipimport",
"pkgutil",
"modulefinder",
"runpy",
"parser",
"ast",
"symtable",
"symbol",
"token",
"keyword",
"tokenize",
"tabnanny",
"pyclbr",
"py_compile",
"compileall",
"dis",
"pickletools",
"formatter",
"msilib",
"msvcrt",
"_winreg",
"winsound",
"posix",
"pwd",
"spwd",
"grp",
"crypt",
"dl",
"termios",
"tty",
"pty",
"fcntl",
"pipes",
"posixfile",
"resource",
"nis",
"syslog",
"commands",
"ic",
"MacOS",
"macostools",
"findertools",
"EasyDialogs",
"FrameWork",
"autoGIL",
"ColorPicker",
"gensuitemodule",
"aetools",
"aepack",
"aetypes",
"MiniAEFrame",
"al",
"AL",
"cd",
"fl",
"FL",
"flp",
"fm",
"gl",
"DEVICE",
"GL",
"imgfile",
"jpeg",
"sunaudiodev",
"SUNAUDIODEV",
])
if sys.version_info[0] == 2:
_iteritems = lambda x: x.iteritems()
def _string_escape(string):
return "'''{0}'''".format(codecs.getencoder("string_escape")(string)[0].decode("ascii"))
else:
_iteritems = lambda x: x.items()
_string_escape = repr
| 21.685149
| 138
| 0.586065
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.