hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a89815a4e744957c4c36a54daa1bc5c05109602 | 1,697 | py | Python | Packs/HealthCheck/Scripts/HealthCheckExportSummary/HealthCheckExportSummary.py | mazmat-panw/content | 024a65c1dea2548e2637a9cbbe54966e9e34a722 | [
"MIT"
] | 2 | 2021-12-06T21:38:24.000Z | 2022-01-13T08:23:36.000Z | Packs/HealthCheck/Scripts/HealthCheckExportSummary/HealthCheckExportSummary.py | mazmat-panw/content | 024a65c1dea2548e2637a9cbbe54966e9e34a722 | [
"MIT"
] | 87 | 2022-02-23T12:10:53.000Z | 2022-03-31T11:29:05.000Z | Packs/HealthCheck/Scripts/HealthCheckExportSummary/HealthCheckExportSummary.py | henry-sue-pa/content | 043c6badfb4f9c80673cad9242fdea72efe301f7 | [
"MIT"
] | 2 | 2022-01-05T15:27:01.000Z | 2022-02-01T19:27:43.000Z | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import json
ctx = demisto.context()
incident = demisto.incidents()[0]
main = {
'incident': incident,
'ctx': ctx,
'widgets': {}
}
# Save data from widgets
# Disk Usage Percentage
res = demisto.executeCommand("HealthCheckDiskUsage", {})[0]['Contents']
json.loads(res).get('stats')
main['widgets']['DiskUsagePerCentage'] = json.loads(res).get('stats')
# Disk Usage Line
res = demisto.executeCommand("HealthCheckDiskUsageLine", {})[0]['Contents']
json.loads(res).get('stats')
main['widgets']['DiskUsagePerLine'] = json.loads(res).get('stats')
# Memory Usage
res = demisto.executeCommand("HealthCheckMemory", {})[0]['Contents']
json.loads(res).get('stats')
main['widgets']['MemoryUsage'] = json.loads(res).get('stats')
# CPU Usage
res = demisto.executeCommand("HealthCheckCPU", {})[0]['Contents']
main['widgets']['CPUUsage'] = json.loads(res).get('stats')
# HealthCheckIncidentsCreatedMonthly
res = demisto.executeCommand("HealthCheckIncidentsCreatedMonthly", {})[0]['Contents']
main['widgets']['IncidentsCreatedMonthly'] = json.loads(res).get('stats')
# HealthCheckIncidentsCreatedWeekly
res = demisto.executeCommand("HealthCheckIncidentsCreatedWeekly", {})[0]['Contents']
main['widgets']['IncidentsCreatedWeekly'] = json.loads(res).get('stats')
# HealthCheckIncidentsCreatedDaily
res = demisto.executeCommand("HealthCheckIncidentsCreatedDaily", {})[0]['Contents']
main['widgets']['IncidentsCreatedDaily'] = json.loads(res).get('stats')
variables = json.dumps(main).encode('utf-8')
variables2 = json.loads(variables)
demisto.results(fileResult('HealthCheckDataExport.txt', variables))
| 32.634615 | 85 | 0.732469 |
513a79328f993b1c234148a0782d02cbbf7a4c3f | 334 | py | Python | cride/rides/permissions/rides.py | jesusRL96/curso_platzi_django_adv | 870596b1ba7285a25777e14c031c9026a5d0f754 | [
"MIT"
] | null | null | null | cride/rides/permissions/rides.py | jesusRL96/curso_platzi_django_adv | 870596b1ba7285a25777e14c031c9026a5d0f754 | [
"MIT"
] | null | null | null | cride/rides/permissions/rides.py | jesusRL96/curso_platzi_django_adv | 870596b1ba7285a25777e14c031c9026a5d0f754 | [
"MIT"
] | null | null | null | from rest_framework.permissions import BasePermission
class IsRideOwner(BasePermission):
def has_object_permission(self, request, view, obj):
return request.user == obj.offered_by
class IsNotRideOwner(BasePermission):
def has_object_permission(self, request, view, obj):
return request.user != obj.offered_by | 37.111111 | 56 | 0.766467 |
0189d369aafc369d7dd56a09e04a10ac284b66e5 | 1,987 | py | Python | labgraph/loggers/hdf5/tests/test_logger.py | Yunusbcr/labgraph | a00ae7098b7b0e0eda8ce2e7e62dae86854616fb | [
"MIT"
] | 124 | 2021-07-14T21:25:59.000Z | 2022-03-08T20:40:16.000Z | labgraph/loggers/hdf5/tests/test_logger.py | Yunusbcr/labgraph | a00ae7098b7b0e0eda8ce2e7e62dae86854616fb | [
"MIT"
] | 46 | 2021-07-16T18:41:11.000Z | 2022-03-31T20:53:00.000Z | labgraph/loggers/hdf5/tests/test_logger.py | Yunusbcr/labgraph | a00ae7098b7b0e0eda8ce2e7e62dae86854616fb | [
"MIT"
] | 22 | 2021-07-16T18:34:56.000Z | 2022-03-31T15:12:06.000Z | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import sys
import h5py
from ....messages.types import (
DynamicType,
StrDynamicType,
StrType,
)
from ..logger import HDF5Logger, SERIALIZABLE_DYNAMIC_TYPES
from .test_utils import LOGGING_IDS, write_logs_to_hdf5
def test_hdf5_logger() -> None:
"""
Tests that we can write messages to an HDF5 file and then read them back.
"""
if sys.version_info > (3, 8):
str_types = (StrType, StrDynamicType)
else:
str_types = (StrType,)
# Write the messages to a file
output_path, logging_ids_and_messages = write_logs_to_hdf5(HDF5Logger)
# Read the messages back from the file and compare to the messages array
with h5py.File(str(output_path), "r") as h5py_file:
for logging_id in LOGGING_IDS:
messages = [l[1] for l in logging_ids_and_messages if l[0] == logging_id]
for i, message in enumerate(messages):
for field in message.__class__.__message_fields__.values():
expected_value = getattr(message, field.name)
actual_value = h5py_file[logging_id][i][field.name]
if isinstance(field.data_type, str_types):
assert (
actual_value.decode(field.data_type.encoding)
== expected_value
)
elif isinstance(field.data_type, SERIALIZABLE_DYNAMIC_TYPES):
actual_value = field.data_type.postprocess(bytes(actual_value))
assert actual_value == expected_value
elif isinstance(field.data_type, DynamicType) and not isinstance(
field.data_type, StrDynamicType
):
assert bytes(actual_value) == expected_value
else:
assert actual_value == expected_value
| 38.211538 | 87 | 0.602416 |
9e62d1b84e24ea6a8452344f6758101229e695d3 | 138 | py | Python | Exercicios/ex030.py | rbpope/AulasPythonMundo1 | 85ad4d97a3451c322a675ee796084f56c07acdcd | [
"MIT"
] | null | null | null | Exercicios/ex030.py | rbpope/AulasPythonMundo1 | 85ad4d97a3451c322a675ee796084f56c07acdcd | [
"MIT"
] | null | null | null | Exercicios/ex030.py | rbpope/AulasPythonMundo1 | 85ad4d97a3451c322a675ee796084f56c07acdcd | [
"MIT"
] | null | null | null | n: int = int(input('Escreva um número na tela: '))
if n % 2 == 0:
print('Esse número é par!')
else:
print('Esse número é impar!')
| 23 | 50 | 0.594203 |
1209db6d561fe7ac9db9c9b74c9f1c24983a76e6 | 76,543 | py | Python | gdal/swig/python/samples/validate_gpkg.py | joa-quim/gdal | 4864590da00e0ff439159e378bdfeb25b4be48d4 | [
"MIT"
] | null | null | null | gdal/swig/python/samples/validate_gpkg.py | joa-quim/gdal | 4864590da00e0ff439159e378bdfeb25b4be48d4 | [
"MIT"
] | null | null | null | gdal/swig/python/samples/validate_gpkg.py | joa-quim/gdal | 4864590da00e0ff439159e378bdfeb25b4be48d4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR
# Purpose: Test compliance of GeoPackage database w.r.t GeoPackage spec
# Author: Even Rouault <even.rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2017, Even Rouault <even.rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import datetime
import os
import sqlite3
import struct
import sys
# GDAL may be used for checks on tile content for the tiled gridded extension.
# If not available, those tests will be skipped
try:
from osgeo import gdal
has_gdal = True
except ImportError:
has_gdal = False
def _esc_literal(literal):
return literal.replace("'", "''")
def _esc_id(identifier):
return '"' + identifier.replace('"', "\"\"") + '"'
def _is_valid_data_type(type):
return type in ('BOOLEAN', 'TINYINT', 'SMALLINT', 'MEDIUMINT',
'INT', 'INTEGER', 'FLOAT', 'DOUBLE', 'REAL',
'TEXT', 'BLOB', 'DATE', 'DATETIME') or \
type.startswith('TEXT(') or type.startswith('BLOB(')
class GPKGCheckException(Exception):
pass
class GPKGChecker(object):
EXT_GEOM_TYPES = ('CIRCULARSTRING', 'COMPOUNDCURVE', 'CURVEPOLYGON',
'MULTICURVE', 'MULTISURFACE', 'CURVE', 'SURFACE')
def __init__(self, filename, abort_at_first_error=True, verbose=False):
self.filename = filename
self.extended_pragma_info = False
self.abort_at_first_error = abort_at_first_error
self.verbose = verbose
self.errors = []
def _log(self, msg):
if self.verbose:
print(msg)
def _assert(self, cond, req, msg):
# self._log('Verified requirement %s' % req)
if not cond:
self.errors += [(req, msg)]
if self.abort_at_first_error:
if req:
raise GPKGCheckException('Req %s: %s' % (str(req), msg))
else:
raise GPKGCheckException(msg)
return cond
def _check_structure(self, columns, expected_columns, req, table_name):
self._assert(len(columns) == len(expected_columns), req,
'Table %s has %d columns, whereas %d are expected' %
(table_name, len(columns), len(expected_columns)))
for (_, expected_name, expected_type, expected_notnull,
expected_default, expected_pk) in expected_columns:
found = False
for (_, name, type, notnull, default, pk) in columns:
if name != expected_name:
continue
if expected_type == 'INTEGER' and expected_pk:
expected_notnull = 1
if type == 'INTEGER' and pk:
notnull = 1
if not self.extended_pragma_info and expected_pk > 1:
expected_pk = 1
self._assert(type == expected_type, req,
'Wrong type for %s of %s. Expected %s, got %s' %
(name, table_name, expected_type, type))
self._assert(notnull == expected_notnull, req,
('Wrong notnull for %s of %s. ' +
'Expected %s, got %s') %
(name, table_name, expected_notnull, notnull))
self._assert(default == expected_default, req,
('Wrong default for %s of %s. ' +
'Expected %s, got %s') %
(name, table_name, expected_default, default))
self._assert(pk == expected_pk, req,
'Wrong pk for %s of %s. Expected %s, got %s' %
(name, table_name, expected_pk, pk))
found = True
break
self._assert(found, req, 'Column %s of %s not found!' %
(expected_name, table_name))
def _check_gpkg_spatial_ref_sys(self, c):
self._log('Checking gpkg_spatial_ref_sys')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_spatial_ref_sys'")
if not self._assert(c.fetchone() is not None, 10,
"gpkg_spatial_ref_sys table missing"):
return
c.execute("PRAGMA table_info(gpkg_spatial_ref_sys)")
columns = c.fetchall()
has_definition_12_063 = False
for (_, name, _, _, _, _) in columns:
if name == 'definition_12_063':
has_definition_12_063 = True
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
row = None
if c.fetchone() is not None:
c.execute("SELECT scope FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_crs_wkt'")
row = c.fetchone()
if row:
scope, = row
self._assert(scope == 'read-write', 145,
'scope of gpkg_crs_wkt extension should be read-write')
self._assert(
has_definition_12_063, 145,
"gpkg_spatial_ref_sys should have a definition_12_063 column, "
"as gpkg_crs_wkt extension is declared")
else:
self._assert(
not has_definition_12_063, 145,
"gpkg_extensions should declare gpkg_crs_wkt extension "
"as gpkg_spatial_ref_sys has a definition_12_063 column")
if has_definition_12_063:
expected_columns = [
(0, 'srs_name', 'TEXT', 1, None, 0),
(1, 'srs_id', 'INTEGER', 1, None, 1),
(2, 'organization', 'TEXT', 1, None, 0),
(3, 'organization_coordsys_id', 'INTEGER', 1, None, 0),
(4, 'definition', 'TEXT', 1, None, 0),
(5, 'description', 'TEXT', 0, None, 0),
(6, 'definition_12_063', 'TEXT', 1, None, 0)
]
else:
expected_columns = [
(0, 'srs_name', 'TEXT', 1, None, 0),
(1, 'srs_id', 'INTEGER', 1, None, 1),
(2, 'organization', 'TEXT', 1, None, 0),
(3, 'organization_coordsys_id', 'INTEGER', 1, None, 0),
(4, 'definition', 'TEXT', 1, None, 0),
(5, 'description', 'TEXT', 0, None, 0)
]
self._check_structure(columns, expected_columns, 10,
'gpkg_spatial_ref_sys')
if has_definition_12_063:
c.execute("SELECT srs_id, organization, organization_coordsys_id, "
"definition, definition_12_063 "
"FROM gpkg_spatial_ref_sys "
"WHERE srs_id IN (-1, 0, 4326) ORDER BY srs_id")
else:
c.execute("SELECT srs_id, organization, organization_coordsys_id, "
"definition FROM gpkg_spatial_ref_sys "
"WHERE srs_id IN (-1, 0, 4326) ORDER BY srs_id")
ret = c.fetchall()
self._assert(len(ret) == 3, 11,
'There should be at least 3 records in '
'gpkg_spatial_ref_sys')
if len(ret) != 3:
return
self._assert(ret[0][1] == 'NONE', 11,
'wrong value for organization for srs_id = -1: %s' %
ret[0][1])
self._assert(ret[0][2] == -1, 11,
'wrong value for organization_coordsys_id for '
'srs_id = -1: %s' % ret[0][2])
self._assert(ret[0][3] == 'undefined', 11,
'wrong value for definition for srs_id = -1: %s' %
ret[0][3])
if has_definition_12_063:
self._assert(ret[0][4] == 'undefined', 116,
'wrong value for definition_12_063 for ' +
'srs_id = -1: %s' % ret[0][4])
self._assert(ret[1][1] == 'NONE', 11,
'wrong value for organization for srs_id = 0: %s' %
ret[1][1])
self._assert(ret[1][2] == 0, 11,
'wrong value for organization_coordsys_id for '
'srs_id = 0: %s' % ret[1][2])
self._assert(ret[1][3] == 'undefined', 11,
'wrong value for definition for srs_id = 0: %s' %
ret[1][3])
if has_definition_12_063:
self._assert(ret[1][4] == 'undefined', 116,
'wrong value for definition_12_063 for ' +
'srs_id = 0: %s' % ret[1][4])
self._assert(ret[2][1].lower() == 'epsg', 11,
'wrong value for organization for srs_id = 4326: %s' %
ret[2][1])
self._assert(ret[2][2] == 4326, 11,
'wrong value for organization_coordsys_id for '
'srs_id = 4326: %s' % ret[2][2])
self._assert(ret[2][3] != 'undefined', 11,
'wrong value for definition for srs_id = 4326: %s' %
ret[2][3])
if has_definition_12_063:
self._assert(ret[2][4] != 'undefined', 116,
'wrong value for definition_12_063 for ' +
'srs_id = 4326: %s' % ret[2][4])
if has_definition_12_063:
c.execute("SELECT srs_id FROM gpkg_spatial_ref_sys "
"WHERE srs_id NOT IN (0, -1) AND "
"definition = 'undefined' AND "
"definition_12_063 = 'undefined'")
rows = c.fetchall()
for (srs_id, ) in rows:
self._assert(False, 117,
'srs_id = %d has both definition and ' % srs_id +
'definition_12_063 undefined')
def _check_gpkg_contents(self, c):
self._log('Checking gpkg_contents')
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_contents'")
self._assert(c.fetchone() is not None, 13,
"gpkg_contents table missing")
c.execute("PRAGMA table_info(gpkg_contents)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 1, None, 1),
(1, 'data_type', 'TEXT', 1, None, 0),
(2, 'identifier', 'TEXT', 0, None, 0),
(3, 'description', 'TEXT', 0, "''", 0),
(4, 'last_change', 'DATETIME', 1,
"strftime('%Y-%m-%dT%H:%M:%fZ','now')", 0),
(5, 'min_x', 'DOUBLE', 0, None, 0),
(6, 'min_y', 'DOUBLE', 0, None, 0),
(7, 'max_x', 'DOUBLE', 0, None, 0),
(8, 'max_y', 'DOUBLE', 0, None, 0),
(9, 'srs_id', 'INTEGER', 0, None, 0)
]
self._check_structure(columns, expected_columns, 13, 'gpkg_contents')
c.execute("SELECT 1 FROM gpkg_contents "
"WHERE data_type IN ('features', 'tiles')")
self._assert(c.fetchone() is not None, 17,
'gpkg_contents should at least have one table with '
'data_type = features and/or tiles')
c.execute("SELECT table_name, data_type FROM gpkg_contents "
"WHERE data_type NOT IN "
"('features', 'tiles', 'attributes', '2d-gridded-coverage')")
ret = c.fetchall()
self._assert(len(ret) == 0, 17,
'Unexpected data types in gpkg_contents: %s' % str(ret))
c.execute('SELECT table_name, last_change, srs_id FROM gpkg_contents')
rows = c.fetchall()
for (table_name, last_change, srs_id) in rows:
c.execute("SELECT 1 FROM sqlite_master WHERE "
"lower(name) = lower(?) AND type IN ('table', 'view')", (table_name,))
self._assert(c.fetchone() is not None, 14,
('table_name=%s in gpkg_contents is not a ' +
'table or view') % table_name)
try:
datetime.datetime.strptime(
last_change, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self._assert(False, 15,
('last_change = %s for table_name = %s ' +
'is invalid datetime') %
(last_change, table_name))
if srs_id is not None:
c.execute('SELECT 1 FROM gpkg_spatial_ref_sys '
'WHERE srs_id = ?', (srs_id, ))
self._assert(c.fetchone() is not None, 14,
("table_name=%s has srs_id=%d in gpkg_contents " +
"which isn't found in gpkg_spatial_ref_sys") %
(table_name, srs_id))
def _check_vector_user_table(self, c, table_name):
self._log('Checking vector user table ' + table_name)
c.execute("SELECT column_name, z, m, geometry_type_name, srs_id "
"FROM gpkg_geometry_columns WHERE table_name = ?",
(table_name,))
rows_gpkg_geometry_columns = c.fetchall()
self._assert(len(rows_gpkg_geometry_columns) == 1, 22,
('table_name = %s is not registered in ' +
'gpkg_geometry_columns') % table_name)
geom_column_name = rows_gpkg_geometry_columns[0][0]
z = rows_gpkg_geometry_columns[0][1]
m = rows_gpkg_geometry_columns[0][2]
geometry_type_name = rows_gpkg_geometry_columns[0][3]
srs_id = rows_gpkg_geometry_columns[0][4]
c.execute('PRAGMA table_info(%s)' % _esc_id(table_name))
base_geom_types = ('GEOMETRY', 'POINT', 'LINESTRING', 'POLYGON',
'MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON',
'GEOMETRYCOLLECTION')
cols = c.fetchall()
found_geom = False
count_pkid = 0
for (_, name, type, notnull, default, pk) in cols:
if name.lower() == geom_column_name.lower():
found_geom = True
self._assert(
type in base_geom_types or
type in GPKGChecker.EXT_GEOM_TYPES,
25, ('invalid type (%s) for geometry ' +
'column of table %s') % (type, table_name))
self._assert(type == geometry_type_name, 31,
('table %s has geometry column of type %s in ' +
'SQL and %s in geometry_type_name of ' +
'gpkg_geometry_columns') %
(table_name, type, geometry_type_name))
elif pk == 1:
count_pkid += 1
self._assert(type == 'INTEGER', 29,
('table %s has a PRIMARY KEY of type %s ' +
'instead of INTEGER') % (table_name, type))
else:
self._assert(_is_valid_data_type(type), 5,
('table %s has column %s of unexpected type %s'
% (table_name, name, type)))
self._assert(found_geom, 24,
'table %s has no %s column' %
(table_name, geom_column_name))
self._assert(count_pkid == 1, 29,
'table %s has no INTEGER PRIMARY KEY' % table_name)
self._assert(z in (0, 1, 2), 27, ("z value of %s is %d. " +
"Expected 0, 1 or 2") % (table_name, z))
self._assert(m in (0, 1, 2), 27, ("m value of %s is %d. " +
"Expected 0, 1 or 2") % (table_name, m))
if geometry_type_name in GPKGChecker.EXT_GEOM_TYPES:
c.execute("SELECT 1 FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_geom_%s' AND "
"table_name = ? AND column_name = ? AND "
"scope = 'read-write'" % geometry_type_name,
(table_name, geom_column_name))
self._assert(c.fetchone() is not None, 68,
"gpkg_geom_%s extension should be declared for "
"table %s" % (geometry_type_name, table_name))
wkb_geometries = base_geom_types + GPKGChecker.EXT_GEOM_TYPES
c.execute("SELECT %s FROM %s " %
(_esc_id(geom_column_name), _esc_id(table_name)))
found_geom_types = set()
for (blob,) in c.fetchall():
if blob is None:
continue
self._assert(len(blob) >= 8, 19, 'Invalid geometry')
max_size_needed = min(len(blob), 8 + 4 * 2 * 8 + 5)
blob_ar = struct.unpack('B' * max_size_needed,
blob[0:max_size_needed])
self._assert(blob_ar[0] == ord('G'), 19, 'Invalid geometry')
self._assert(blob_ar[1] == ord('P'), 19, 'Invalid geometry')
self._assert(blob_ar[2] == 0, 19, 'Invalid geometry')
flags = blob_ar[3]
big_endian = (flags & 1) == 0
env_ind = (flags >> 1) & 7
self._assert(((flags >> 5) & 1) == 0, 19,
'Invalid geometry: ExtendedGeoPackageBinary not '
'allowed')
self._assert(env_ind <= 4, 19,
'Invalid geometry: invalid envelope indicator code')
if big_endian:
geom_srs_id = struct.unpack('>I' * 1, blob[4:8])[0]
else:
geom_srs_id = struct.unpack('<I' * 1, blob[4:8])[0]
self._assert(srs_id == geom_srs_id, 33,
('table %s has geometries with SRID %d, ' +
'whereas only %d is expected') %
(table_name, geom_srs_id, srs_id))
if env_ind == 0:
coord_dim = 0
elif env_ind == 1:
coord_dim = 2
elif env_ind == 2 or env_ind == 3:
coord_dim = 3
else:
coord_dim = 4
# if env_ind == 2 or env_ind == 4:
# self._assert(z > 0, 19,
# 'z found in geometry, but not in gpkg_geometry_columns')
# if env_ind == 3 or env_ind == 4:
# self._assert(m > 0, 19,
# 'm found in geometry, but not in gpkg_geometry_columns')
header_len = 8 + coord_dim * 2 * 8
self._assert(len(blob) >= header_len, 19, 'Invalid geometry')
wkb_endianness = blob_ar[header_len]
wkb_big_endian = (wkb_endianness == 0)
if wkb_big_endian:
wkb_geom_type = struct.unpack(
'>I' * 1, blob[header_len + 1:header_len + 5])[0]
else:
wkb_geom_type = struct.unpack(
'<I' * 1, blob[header_len + 1:header_len + 5])[0]
self._assert(wkb_geom_type >= 0 and
(wkb_geom_type % 1000) < len(wkb_geometries),
19, 'Invalid WKB geometry type')
wkb_dim = int(wkb_geom_type / 1000)
if z == 1:
self._assert(wkb_dim == 1 or wkb_dim == 3, 19,
'geometry without Z found')
if m == 1:
self._assert(wkb_dim == 2 or wkb_dim == 3, 19,
'geometry without M found')
if wkb_dim == 1 or wkb_dim == 3: # Z or ZM
self._assert(z > 0, 19,
'z found in geometry, but not in '
'gpkg_geometry_columns')
if wkb_dim == 2 or wkb_dim == 3: # M or ZM
self._assert(m > 0, 19,
'm found in geometry, but not in '
'gpkg_geometry_columns')
found_geom_types.add(wkb_geometries[wkb_geom_type % 1000])
if geometry_type_name in ('POINT', 'LINESTRING', 'POLYGON',
'MULTIPOINT', 'MULTILINESTRING',
'MULTIPOLYGON'):
self._assert(not found_geom_types or
found_geom_types == set([geometry_type_name]), 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name == 'GEOMETRYCOLLECTION':
self._assert(not found_geom_types or
not found_geom_types.difference(
set(['GEOMETRYCOLLECTION', 'MULTIPOINT',
'MULTILINESTRING', 'MULTIPOLYGON',
'MULTICURVE', 'MULTISURFACE'])), 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name in ('CURVEPOLYGON', 'SURFACE'):
self._assert(not found_geom_types or
not found_geom_types.difference(
set(['POLYGON', 'CURVEPOLYGON'])), 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name == 'MULTICURVE':
self._assert(not found_geom_types or
not found_geom_types.difference(
set(['MULTILINESTRING', 'MULTICURVE'])), 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name == 'MULTISURFACE':
self._assert(not found_geom_types or
not found_geom_types.difference(
set(['MULTIPOLYGON', 'MULTISURFACE'])), 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name == 'CURVE':
self._assert(not found_geom_types or
not found_geom_types.difference(
set(['LINESTRING', 'CIRCULARSTRING',
'COMPOUNDCURVE'])), 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
for geom_type in found_geom_types:
if geom_type in GPKGChecker.EXT_GEOM_TYPES:
c.execute("SELECT 1 FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_geom_%s' AND "
"table_name = ? AND column_name = ? AND "
"scope = 'read-write'" % geom_type,
(table_name, geom_column_name))
self._assert(c.fetchone() is not None, 68,
"gpkg_geom_%s extension should be declared for "
"table %s" % (geom_type, table_name))
rtree_name = 'rtree_%s_%s' % (table_name, geom_column_name)
c.execute("SELECT 1 FROM sqlite_master WHERE name = ?", (rtree_name,))
has_rtree = c.fetchone() is not None
if has_rtree:
c.execute("SELECT 1 FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_rtree_index' AND "
"table_name=? AND column_name=? AND "
"scope='write-only'",
(table_name, geom_column_name))
self._assert(c.fetchone() is not None, 78,
("Table %s has a RTree, but not declared in " +
"gpkg_extensions") % table_name)
c.execute('PRAGMA table_info(%s)' % _esc_id(rtree_name))
columns = c.fetchall()
expected_columns = [
(0, 'id', '', 0, None, 0),
(1, 'minx', '', 0, None, 0),
(2, 'maxx', '', 0, None, 0),
(3, 'miny', '', 0, None, 0),
(4, 'maxy', '', 0, None, 0)
]
self._check_structure(columns, expected_columns, 77, rtree_name)
c.execute("SELECT 1 FROM sqlite_master WHERE type = 'trigger' " +
"AND name = '%s_insert'" % _esc_literal(rtree_name))
self._assert(c.fetchone() is not None, 75,
"%s_insert trigger missing" % rtree_name)
for i in range(4):
c.execute("SELECT 1 FROM sqlite_master WHERE " +
"type = 'trigger' " +
"AND name = '%s_update%d'" %
(_esc_literal(rtree_name), i + 1))
self._assert(c.fetchone() is not None, 75,
"%s_update%d trigger missing" % (rtree_name, i + 1))
c.execute("SELECT 1 FROM sqlite_master WHERE type = 'trigger' " +
"AND name = '%s_delete'" % _esc_literal(rtree_name))
self._assert(c.fetchone() is not None, 75,
"%s_delete trigger missing" % rtree_name)
def _check_features(self, c):
self._log('Checking features')
c.execute("SELECT 1 FROM gpkg_contents WHERE data_type = 'features'")
if c.fetchone() is None:
self._log('... No features table')
return
self._log('Checking gpkg_geometry_columns')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_geometry_columns'")
self._assert(c.fetchone() is not None, 21,
"gpkg_geometry_columns table missing")
c.execute("PRAGMA table_info(gpkg_geometry_columns)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 1, None, 1),
(1, 'column_name', 'TEXT', 1, None, 2),
(2, 'geometry_type_name', 'TEXT', 1, None, 0),
(3, 'srs_id', 'INTEGER', 1, None, 0),
(4, 'z', 'TINYINT', 1, None, 0),
(5, 'm', 'TINYINT', 1, None, 0)
]
self._check_structure(columns, expected_columns, 21,
'gpkg_geometry_columns')
c.execute("SELECT table_name FROM gpkg_contents WHERE "
"data_type = 'features'")
rows = c.fetchall()
for (table_name,) in rows:
self._check_vector_user_table(c, table_name)
c.execute("SELECT table_name, srs_id FROM gpkg_geometry_columns")
rows = c.fetchall()
for (table_name, srs_id) in rows:
c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ? " +
"AND data_type='features'", (table_name,))
ret = c.fetchall()
self._assert(len(ret) == 1, 23,
('table_name = %s is registered in ' +
'gpkg_geometry_columns, but not in gpkg_contents') %
table_name)
c.execute('SELECT 1 FROM gpkg_spatial_ref_sys WHERE ' +
'srs_id = ?', (srs_id, ))
self._assert(c.fetchone() is not None, 14,
("table_name=%s has srs_id=%d in " +
"gpkg_geometry_columns which isn't found in " +
"gpkg_spatial_ref_sys") % (table_name, srs_id))
def _check_attributes(self, c):
self._log('Checking attributes')
c.execute("SELECT table_name FROM gpkg_contents WHERE "
"data_type = 'attributes'")
rows = c.fetchall()
if not rows:
self._log('... No attributes table')
for (table_name,) in rows:
self._log('Checking attributes table ' + table_name)
c.execute('PRAGMA table_info(%s)' % _esc_id(table_name))
cols = c.fetchall()
count_pkid = 0
for (_, name, type, notnull, default, pk) in cols:
if pk == 1:
count_pkid += 1
self._assert(type == 'INTEGER', 119,
('table %s has a PRIMARY KEY of type %s ' +
'instead of INTEGER') % (table_name, type))
else:
self._assert(_is_valid_data_type(type), 5,
'table %s has column %s of unexpected type %s'
% (table_name, name, type))
self._assert(count_pkid == 1, 119,
'table %s has no INTEGER PRIMARY KEY' % table_name)
def _check_tile_user_table(self, c, table_name, data_type):
self._log('Checking tile pyramid user table ' + table_name)
c.execute("PRAGMA table_info(%s)" % _esc_id(table_name))
columns = c.fetchall()
expected_columns = [
(0, 'id', 'INTEGER', 0, None, 1),
(1, 'zoom_level', 'INTEGER', 1, None, 0),
(2, 'tile_column', 'INTEGER', 1, None, 0),
(3, 'tile_row', 'INTEGER', 1, None, 0),
(4, 'tile_data', 'BLOB', 1, None, 0)
]
self._check_structure(columns, expected_columns, 54,
'gpkg_tile_matrix_set')
c.execute("SELECT DISTINCT zoom_level FROM %s" % _esc_id(table_name))
rows = c.fetchall()
for (zoom_level, ) in rows:
c.execute("SELECT 1 FROM gpkg_tile_matrix WHERE table_name = ? "
"AND zoom_level = ?", (table_name, zoom_level))
self._assert(c.fetchone() is not None, 44,
("Table %s has data for zoom_level = %d, but no " +
"corresponding row in gpkg_tile_matrix") %
(table_name, zoom_level))
zoom_other_levels = False
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
if c.fetchone() is not None:
c.execute("SELECT column_name FROM gpkg_extensions WHERE "
"table_name = ? "
"AND extension_name = 'gpkg_zoom_other'", (table_name,))
row = c.fetchone()
if row is not None:
(column_name, ) = row
self._assert(column_name == 'tile_data', 88,
'Wrong column_name in gpkg_extensions for '
'gpkg_zoom_other')
zoom_other_levels = True
c.execute("SELECT zoom_level, pixel_x_size, pixel_y_size "
"FROM gpkg_tile_matrix "
"WHERE table_name = ? ORDER BY zoom_level", (table_name,))
rows = c.fetchall()
prev_zoom_level = None
prev_pixel_x_size = None
prev_pixel_y_size = None
for (zoom_level, pixel_x_size, pixel_y_size) in rows:
if prev_pixel_x_size is not None:
self._assert(
pixel_x_size < prev_pixel_x_size and
pixel_y_size < prev_pixel_y_size,
53,
('For table %s, pixel size are not consistent ' +
'with zoom_level') % table_name)
if prev_zoom_level is not None and \
zoom_level == prev_zoom_level + 1 and not zoom_other_levels:
self._assert(
abs((pixel_x_size - prev_pixel_x_size / 2) /
prev_pixel_x_size) < 1e-5, 35,
"Expected pixel_x_size=%f for zoom_level=%d. Got %f" %
(prev_pixel_x_size / 2, zoom_level, pixel_x_size))
self._assert(
abs((pixel_y_size - prev_pixel_y_size / 2) /
prev_pixel_y_size) < 1e-5, 35,
"Expected pixel_y_size=%f for zoom_level=%d. Got %f" %
(prev_pixel_y_size / 2, zoom_level, pixel_y_size))
prev_pixel_x_size = pixel_x_size
prev_pixel_y_size = pixel_y_size
prev_zoom_level = zoom_level
c.execute("SELECT max_x - min_x, "
" MIN(matrix_width * tile_width * pixel_x_size), "
" MAX(matrix_width * tile_width * pixel_x_size), "
" max_y - min_y, "
" MIN(matrix_height * tile_height * pixel_y_size), "
" MAX(matrix_height * tile_height * pixel_y_size) "
"FROM gpkg_tile_matrix tm JOIN gpkg_tile_matrix_set tms "
"ON tm.table_name = tms.table_name WHERE tm.table_name = ?",
(table_name,))
rows = c.fetchall()
if rows:
(dx, min_dx, max_dx, dy, min_dy, max_dy) = rows[0]
self._assert(abs((min_dx - dx) / dx) < 1e-3 and
abs((max_dx - dx) / dx) < 1e-3 and
abs((min_dy - dy) / dy) < 1e-3 and
abs((max_dy - dy) / dy) < 1e-3, 45,
("Inconsistent values in gpkg_tile_matrix and " +
"gpkg_tile_matrix_set for table %s") % table_name)
c.execute("SELECT DISTINCT zoom_level FROM %s" % _esc_id(table_name))
rows = c.fetchall()
for (zoom_level,) in rows:
c.execute(("SELECT MIN(tile_column), MAX(tile_column), " +
"MIN(tile_row), MAX(tile_row) FROM %s " +
"WHERE zoom_level = %d") %
(_esc_id(table_name), zoom_level))
min_col, max_col, min_row, max_row = c.fetchone()
c.execute("SELECT matrix_width, matrix_height FROM "
"gpkg_tile_matrix "
"WHERE table_name = ? AND zoom_level = ?",
(table_name, zoom_level))
rows2 = c.fetchall()
if not rows2:
self._assert(False, 55,
"Invalid zoom_level in %s" % table_name)
else:
matrix_width, matrix_height = rows2[0]
self._assert(min_col >= 0 and min_col < matrix_width, 56,
"Invalid tile_col in %s" % table_name)
self._assert(min_row >= 0 and min_row < matrix_height, 57,
"Invalid tile_row in %s" % table_name)
c.execute("SELECT tile_data FROM %s" % _esc_id(table_name))
found_webp = False
for (blob,) in c.fetchall():
self._assert(blob is not None and len(blob) >= 12, 19,
'Invalid blob')
max_size_needed = 12
blob_ar = struct.unpack('B' * max_size_needed,
blob[0:max_size_needed])
is_jpeg = blob_ar[0:3] == (0xff, 0xd8, 0xff)
is_png = blob_ar[0:4] == (0x89, 0x50, 0x4E, 0x47)
is_webp = blob_ar[0:4] == (ord('R'), ord('I'),
ord('F'), ord('F')) and \
blob_ar[8:12] == (ord('W'), ord('E'), ord('B'), ord('P'))
is_tiff = blob_ar[0:4] == (0x49, 0x49, 0x2A, 0x00) or \
blob_ar[0:4] == (0x4D, 0x4D, 0x00, 0x2A)
self._assert(is_jpeg or is_png or is_webp or is_tiff, 36,
'Unrecognized image mime type')
if data_type == 'tiles':
self._assert(is_jpeg or is_png or is_webp, 36,
'Unrecognized image mime type')
elif data_type == '2d-gridded-coverage':
self._assert(is_png or is_tiff, 36,
'Unrecognized image mime type')
if is_webp:
found_webp = True
if found_webp:
c.execute("SELECT 1 FROM gpkg_extensions WHERE "
"table_name = ? AND column_name = 'tile_data' AND "
"extension_name = 'gpkg_webp' AND "
"scope = 'read-write'", (table_name, ))
self._assert(c.fetchone() is not None, 91,
("Table %s has webp content, but not registered "
"in gpkg_extensions" % table_name))
def _check_tiles(self, c):
self._log('Checking tiles')
c.execute("SELECT 1 FROM gpkg_contents WHERE data_type IN "
"('tiles', '2d-gridded-coverage')")
if c.fetchone() is None:
self._log('... No tiles table')
return
self._log('Checking gpkg_tile_matrix_set ')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_tile_matrix_set'")
self._assert(c.fetchone() is not None, 38,
"gpkg_tile_matrix_set table missing")
c.execute("PRAGMA table_info(gpkg_tile_matrix_set)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 1, None, 1),
(1, 'srs_id', 'INTEGER', 1, None, 0),
(2, 'min_x', 'DOUBLE', 1, None, 0),
(3, 'min_y', 'DOUBLE', 1, None, 0),
(4, 'max_x', 'DOUBLE', 1, None, 0),
(5, 'max_y', 'DOUBLE', 1, None, 0)]
self._check_structure(columns, expected_columns, 38,
'gpkg_tile_matrix_set')
c.execute("SELECT table_name, srs_id FROM gpkg_tile_matrix_set")
rows = c.fetchall()
for (table_name, srs_id) in rows:
c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ? " +
"AND data_type IN ('tiles', '2d-gridded-coverage')",
(table_name,))
ret = c.fetchall()
self._assert(len(ret) == 1, 39,
('table_name = %s is registered in ' +
'gpkg_tile_matrix_set, but not in gpkg_contents') %
table_name)
c.execute('SELECT 1 FROM gpkg_spatial_ref_sys WHERE srs_id = ?',
(srs_id, ))
self._assert(c.fetchone() is not None, 41,
("table_name=%s has srs_id=%d in " +
"gpkg_tile_matrix_set which isn't found in " +
"gpkg_spatial_ref_sys") % (table_name, srs_id))
self._log('Checking gpkg_tile_matrix')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_tile_matrix'")
self._assert(c.fetchone() is not None, 42,
"gpkg_tile_matrix table missing")
c.execute("PRAGMA table_info(gpkg_tile_matrix)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 1, None, 1),
(1, 'zoom_level', 'INTEGER', 1, None, 2),
(2, 'matrix_width', 'INTEGER', 1, None, 0),
(3, 'matrix_height', 'INTEGER', 1, None, 0),
(4, 'tile_width', 'INTEGER', 1, None, 0),
(5, 'tile_height', 'INTEGER', 1, None, 0),
(6, 'pixel_x_size', 'DOUBLE', 1, None, 0),
(7, 'pixel_y_size', 'DOUBLE', 1, None, 0)
]
self._check_structure(columns, expected_columns, 42,
'gpkg_tile_matrix')
c.execute("SELECT table_name, zoom_level, matrix_width, "
"matrix_height, tile_width, tile_height, pixel_x_size, "
"pixel_y_size FROM gpkg_tile_matrix")
rows = c.fetchall()
for (table_name, zoom_level, matrix_width, matrix_height, tile_width,
tile_height, pixel_x_size, pixel_y_size) in rows:
c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ? "
"AND data_type IN ('tiles', '2d-gridded-coverage')",
(table_name,))
ret = c.fetchall()
self._assert(len(ret) == 1, 43,
('table_name = %s is registered in ' +
'gpkg_tile_matrix, but not in gpkg_contents') %
table_name)
self._assert(zoom_level >= 0, 46,
"Invalid zoom_level = %d for table %s" %
(zoom_level, table_name))
self._assert(matrix_width > 0, 47,
"Invalid matrix_width = %d for table %s" %
(matrix_width, table_name))
self._assert(matrix_height > 0, 48,
"Invalid matrix_height = %d for table %s" %
(matrix_height, table_name))
self._assert(tile_width > 0, 49,
"Invalid tile_width = %d for table %s" %
(tile_width, table_name))
self._assert(tile_height > 0, 50,
"Invalid tile_height = %d for table %s" %
(tile_height, table_name))
self._assert(pixel_x_size > 0, 51,
"Invalid pixel_x_size = %f for table %s" %
(pixel_x_size, table_name))
self._assert(pixel_y_size > 0, 52,
"Invalid pixel_y_size = %f for table %s" %
(pixel_y_size, table_name))
c.execute("SELECT table_name, data_type FROM gpkg_contents WHERE "
"data_type IN ('tiles', '2d-gridded-coverage')")
rows = c.fetchall()
for (table_name, data_type) in rows:
self._check_tile_user_table(c, table_name, data_type)
def _check_tiled_gridded_coverage_data(self, c):
self._log('Checking tiled gridded elevation data')
c.execute("SELECT table_name FROM gpkg_contents WHERE "
"data_type = '2d-gridded-coverage'")
tables = c.fetchall()
if not tables:
self._log('... No tiled gridded coverage table')
return
tables = [tables[i][0] for i in range(len(tables))]
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_2d_gridded_coverage_ancillary'")
self._assert(c.fetchone() is not None, 'gpkg_2d_gridded_coverage#1',
'gpkg_2d_gridded_coverage_ancillary table is missing')
c.execute("PRAGMA table_info(gpkg_2d_gridded_coverage_ancillary)")
columns = c.fetchall()
expected_columns = [
(0, 'id', 'INTEGER', 1, None, 1),
(1, 'tile_matrix_set_name', 'TEXT', 1, None, 0),
(2, 'datatype', 'TEXT', 1, "'integer'", 0),
(3, 'scale', 'REAL', 1, '1.0', 0),
(4, 'offset', 'REAL', 1, '0.0', 0),
(5, 'precision', 'REAL', 0, '1.0', 0),
(6, 'data_null', 'REAL', 0, None, 0),
(7, 'grid_cell_encoding', 'TEXT', 0, "'grid-value-is-center'", 0),
(8, 'uom', 'TEXT', 0, None, 0),
(9, 'field_name', 'TEXT', 0, "'Height'", 0),
(10, 'quantity_definition', 'TEXT', 0, "'Height'", 0)
]
self._check_structure(columns, expected_columns, 'gpkg_2d_gridded_coverage#1',
'gpkg_2d_gridded_coverage_ancillary')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_2d_gridded_tile_ancillary'")
self._assert(c.fetchone() is not None, 'gpkg_2d_gridded_coverage#2',
'gpkg_2d_gridded_tile_ancillary table is missing')
c.execute("PRAGMA table_info(gpkg_2d_gridded_tile_ancillary)")
columns = c.fetchall()
expected_columns = [
(0, 'id', 'INTEGER', 0, None, 1),
(1, 'tpudt_name', 'TEXT', 1, None, 0),
(2, 'tpudt_id', 'INTEGER', 1, None, 0),
(3, 'scale', 'REAL', 1, '1.0', 0),
(4, 'offset', 'REAL', 1, '0.0', 0),
(5, 'min', 'REAL', 0, 'NULL', 0),
(6, 'max', 'REAL', 0, 'NULL', 0),
(7, 'mean', 'REAL', 0, 'NULL', 0),
(8, 'std_dev', 'REAL', 0, 'NULL', 0)
]
self._check_structure(columns, expected_columns, 'gpkg_2d_gridded_coverage#2',
'gpkg_2d_gridded_tile_ancillary')
c.execute("SELECT srs_id, organization, organization_coordsys_id, "
"definition FROM gpkg_spatial_ref_sys "
"WHERE srs_id = 4979")
ret = c.fetchall()
self._assert(len(ret) == 1, 'gpkg_2d_gridded_coverage#3',
"gpkg_spatial_ref_sys shall have a row for srs_id=4979")
self._assert(ret[0][1].lower() == 'epsg', 'gpkg_2d_gridded_coverage#3',
'wrong value for organization for srs_id = 4979: %s' %
ret[0][1])
self._assert(ret[0][2] == 4979, 'gpkg_2d_gridded_coverage#3',
('wrong value for organization_coordsys_id for ' +
'srs_id = 4979: %s') % ret[0][2])
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
self._assert(c.fetchone() is not None, 'gpkg_2d_gridded_coverage#6',
'gpkg_extensions does not exist')
c.execute("SELECT table_name, column_name, definition, scope FROM "
"gpkg_extensions WHERE "
"extension_name = 'gpkg_2d_gridded_coverage'")
rows = c.fetchall()
self._assert(len(rows) == 2 + len(tables), 'gpkg_2d_gridded_coverage#6',
"Wrong number of entries in gpkg_extensions with "
"2d_gridded_coverage extension name")
found_gpkg_2d_gridded_coverage_ancillary = False
found_gpkg_2d_gridded_tile_ancillary = False
expected_def = \
'http://docs.opengeospatial.org/is/17-066r1/17-066r1.html'
for (table_name, column_name, definition, scope) in rows:
if table_name == 'gpkg_2d_gridded_coverage_ancillary':
found_gpkg_2d_gridded_coverage_ancillary = True
self._assert(column_name is None, 'gpkg_2d_gridded_coverage#6',
"Wrong entry for "
"gpkg_2d_gridded_coverage_ancillary "
"in gpkg_extensions")
self._assert(definition == expected_def, 'gpkg_2d_gridded_coverage#6',
"Wrong entry (definition) for "
"gpkg_2d_gridded_coverage_ancillary "
"in gpkg_extensions")
self._assert(scope == 'read-write', 'gpkg_2d_gridded_coverage#6',
"Wrong entry for "
"gpkg_2d_gridded_coverage_ancillary "
"in gpkg_extensions")
elif table_name == 'gpkg_2d_gridded_tile_ancillary':
found_gpkg_2d_gridded_tile_ancillary = True
self._assert(column_name is None, 'gpkg_2d_gridded_coverage#6',
"Wrong entry for "
"gpkg_2d_gridded_tile_ancillary "
"in gpkg_extensions")
self._assert(definition == expected_def, 'gpkg_2d_gridded_coverage#6',
"Wrong entry (definition) for "
"gpkg_2d_gridded_tile_ancillary "
"in gpkg_extensions")
self._assert(scope == 'read-write', 'gpkg_2d_gridded_coverage#6',
"Wrong entry for "
"gpkg_2d_gridded_tile_ancillary "
"in gpkg_extensions")
else:
self._assert(table_name in tables, 'gpkg_2d_gridded_coverage#6',
"Unexpected table_name registered for " +
"2d_gridded_coverage: %s" % table_name)
self._assert(column_name == 'tile_data', 'gpkg_2d_gridded_coverage#6',
"Wrong entry for %s " % table_name +
"in gpkg_extensions")
self._assert(definition == expected_def, 'gpkg_2d_gridded_coverage#6',
"Wrong entry (definition) for %s " % table_name +
"in gpkg_extensions")
self._assert(scope == 'read-write', 'gpkg_2d_gridded_coverage#6',
"Wrong entry for %s " % table_name +
"in gpkg_extensions")
self._assert(found_gpkg_2d_gridded_coverage_ancillary, 'gpkg_2d_gridded_coverage#6',
"gpkg_2d_gridded_coverage_ancillary not registered "
"for 2d_gridded_coverage")
self._assert(found_gpkg_2d_gridded_tile_ancillary, 'gpkg_2d_gridded_coverage#6',
"gpkg_2d_gridded_tile_ancillary not registered "
"for 2d_gridded_coverage")
c.execute("SELECT tile_matrix_set_name, datatype FROM "
"gpkg_2d_gridded_coverage_ancillary")
rows = c.fetchall()
self._assert(len(rows) == len(tables), 'gpkg_2d_gridded_coverage#7',
"Wrong number of entries in "
"gpkg_2d_gridded_coverage_ancillary")
for (tile_matrix_set_name, datatype) in rows:
self._assert(tile_matrix_set_name in tables, 'gpkg_2d_gridded_coverage#7',
"Table %s has a row in " % tile_matrix_set_name +
"gpkg_2d_gridded_coverage_ancillary, but not in "
"gpkg_contents")
c.execute('SELECT 1 FROM gpkg_tile_matrix_set WHERE '
'table_name = ?', (tile_matrix_set_name,))
self._assert(c.fetchone() is not None, 'gpkg_2d_gridded_coverage#8',
'missing entry in gpkg_tile_matrix_set ' +
'for %s' % tile_matrix_set_name)
self._assert(datatype in ('integer', 'float'), 'gpkg_2d_gridded_coverage#9',
'Unexpected datatype = %s' % datatype)
for table in tables:
c.execute("SELECT COUNT(*) FROM %s" % _esc_id(table))
count_tpudt = c.fetchone()
c.execute("SELECT COUNT(*) FROM gpkg_2d_gridded_tile_ancillary "
"WHERE tpudt_name = ?", (table, ))
count_tile_ancillary = c.fetchone()
self._assert(count_tpudt == count_tile_ancillary, 'gpkg_2d_gridded_coverage#10',
("Inconsistent number of rows in " +
"gpkg_2d_gridded_tile_ancillary for %s") % table)
c.execute("SELECT DISTINCT tpudt_name FROM "
"gpkg_2d_gridded_tile_ancillary")
rows = c.fetchall()
for (tpudt_name, ) in rows:
self._assert(tpudt_name in tables, 'gpkg_2d_gridded_coverage#11',
"tpudt_name = %s is invalid" % tpudt_name)
c.execute("SELECT tile_matrix_set_name FROM "
"gpkg_2d_gridded_coverage_ancillary WHERE "
"datatype = 'float'")
rows = c.fetchall()
for (tile_matrix_set_name, ) in rows:
c.execute("SELECT 1 FROM gpkg_2d_gridded_tile_ancillary WHERE "
"tpudt_name = ? AND "
"NOT (offset == 0.0 AND scale == 1.0)",
(tile_matrix_set_name,))
self._assert(len(c.fetchall()) == 0, 'gpkg_2d_gridded_coverage#9',
"Wrong scale and offset values " +
"for %s " % tile_matrix_set_name +
"in gpkg_2d_gridded_coverage_ancillary")
for table in tables:
c.execute("SELECT 1 FROM gpkg_2d_gridded_tile_ancillary WHERE " +
"tpudt_name = ? AND tpudt_id NOT IN (SELECT id FROM " +
"%s)" % table, (table,))
self._assert(len(c.fetchall()) == 0, 'gpkg_2d_gridded_coverage#12',
"tpudt_id in gpkg_2d_gridded_coverage_ancillary " +
"not referencing an id from %s" % table)
c.execute("SELECT tile_matrix_set_name, datatype FROM "
"gpkg_2d_gridded_coverage_ancillary")
rows = c.fetchall()
warn_gdal_not_available = False
for (table_name, datatype) in rows:
c.execute("SELECT id, tile_data FROM %s" % _esc_id(table_name))
for (id, blob) in c.fetchall():
self._assert(blob is not None and len(blob) >= 12, 19,
'Invalid blob')
max_size_needed = 12
blob_ar = struct.unpack('B' * max_size_needed,
blob[0:max_size_needed])
is_png = blob_ar[0:4] == (0x89, 0x50, 0x4E, 0x47)
is_tiff = blob_ar[0:4] == (0x49, 0x49, 0x2A, 0x00) or \
blob_ar[0:4] == (0x4D, 0x4D, 0x00, 0x2A)
if datatype == 'integer':
self._assert(is_png, 'gpkg_2d_gridded_coverage#13',
'Tile for %s should be PNG' % table_name)
if has_gdal:
tmp_file = '/vsimem/temp_validate_gpkg.tif'
try:
blob = bytes(blob)
except:
blob = str(blob)
gdal.FileFromMemBuffer(tmp_file, blob)
ds = gdal.Open(tmp_file)
try:
self._assert(ds is not None, 'gpkg_2d_gridded_coverage#13',
'Invalid tile %d in %s' %
(id, table_name))
self._assert(ds.RasterCount == 1, 'gpkg_2d_gridded_coverage#13',
'Invalid tile %d in %s' %
(id, table_name))
self._assert(ds.GetRasterBand(1).DataType ==
gdal.GDT_UInt16, 'gpkg_2d_gridded_coverage#13',
'Invalid tile %d in %s' %
(id, table_name))
finally:
gdal.Unlink(tmp_file)
else:
if not warn_gdal_not_available:
warn_gdal_not_available = True
self._log('GDAL not available. Req gpkg_2d_gridded_coverage#13 not tested')
elif datatype == 'float':
self._assert(is_tiff, 'gpkg_2d_gridded_coverage#14',
'Tile for %s should be TIFF' % table_name)
if has_gdal:
tmp_file = '/vsimem/temp_validate_gpkg.tif'
try:
blob = bytes(blob)
except:
blob = str(blob)
gdal.FileFromMemBuffer(tmp_file, blob)
ds = gdal.Open(tmp_file)
try:
self._assert(ds is not None, 'gpkg_2d_gridded_coverage#15',
'Invalid tile %d in %s' %
(id, table_name))
self._assert(ds.RasterCount == 1, 'gpkg_2d_gridded_coverage#16',
'Invalid tile %d in %s' %
(id, table_name))
self._assert(ds.GetRasterBand(1).DataType ==
gdal.GDT_Float32, 'gpkg_2d_gridded_coverage#17',
'Invalid tile %d in %s' %
(id, table_name))
compression = ds.GetMetadataItem('COMPRESSION',
'IMAGE_STRUCTURE')
self._assert(compression is None or
compression == 'LZW', 'gpkg_2d_gridded_coverage#18',
'Invalid tile %d in %s' %
(id, table_name))
ovr_count = ds.GetRasterBand(1).GetOverviewCount()
self._assert(not ds.GetSubDatasets() and
ovr_count == 0, 'gpkg_2d_gridded_coverage#19',
'Invalid tile %d in %s' %
(id, table_name))
(blockxsize, _) = \
ds.GetRasterBand(1).GetBlockSize()
self._assert(blockxsize == ds.RasterXSize, 'gpkg_2d_gridded_coverage#20',
'Invalid tile %d in %s' %
(id, table_name))
finally:
gdal.Unlink(tmp_file)
else:
if not warn_gdal_not_available:
warn_gdal_not_available = True
self._log('GDAL not available. '
'Req gpkg_2d_gridded_coverage#15 to gpkg_2d_gridded_coverage#19 not tested')
def _check_gpkg_extensions(self, c):
self._log('Checking gpkg_extensions')
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
if c.fetchone() is None:
self._log('... No extensions')
return
c.execute("PRAGMA table_info(gpkg_extensions)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 0, None, 0),
(1, 'column_name', 'TEXT', 0, None, 0),
(2, 'extension_name', 'TEXT', 1, None, 0),
(3, 'definition', 'TEXT', 1, None, 0),
(4, 'scope', 'TEXT', 1, None, 0)]
self._check_structure(columns, expected_columns, 58,
'gpkg_extensions')
c.execute("SELECT table_name, column_name FROM gpkg_extensions WHERE "
"table_name IS NOT NULL")
rows = c.fetchall()
for (table_name, column_name) in rows:
# Doesn't work for gpkg_2d_gridded_coverage_ancillary
# c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ?", \
# (table_name,) )
# ret = c.fetchall()
# self._assert(len(ret) == 1, \
# 60, ('table_name = %s is registered in ' +\
# 'gpkg_extensions, but not in gpkg_contents') % table_name)
if column_name is not None:
try:
c.execute('SELECT %s FROM %s' %
(_esc_id(column_name), _esc_id(table_name)))
c.fetchone()
except:
self._assert(False, 61,
("Column %s of table %s mentioned in " +
"gpkg_extensions doesn't exist") %
(column_name, table_name))
c.execute("SELECT extension_name FROM gpkg_extensions")
rows = c.fetchall()
KNOWN_EXTENSIONS = ['gpkg_rtree_index',
'gpkg_zoom_other',
'gpkg_webp',
'gpkg_metadata',
'gpkg_schema',
'gpkg_crs_wkt',
'gpkg_elevation_tiles', # deprecated one
'gpkg_2d_gridded_coverage'
]
for geom_name in GPKGChecker.EXT_GEOM_TYPES:
KNOWN_EXTENSIONS += ['gpkg_geom_' + geom_name]
for (extension_name,) in rows:
if extension_name.startswith('gpkg_'):
self._assert(extension_name in KNOWN_EXTENSIONS,
62,
"extension_name %s not valid" % extension_name)
else:
self._assert('_' in extension_name,
62,
"extension_name %s not valid" % extension_name)
author = extension_name[0:extension_name.find('_')]
ext_name = extension_name[extension_name.find('_') + 1:]
for x in author:
self._assert((x >= 'a' and x <= 'z') or
(x >= 'A' and x <= 'Z') or
(x >= '0' and x <= '9'),
62,
"extension_name %s not valid" %
extension_name)
for x in ext_name:
self._assert((x >= 'a' and x <= 'z') or
(x >= 'A' and x <= 'Z') or
(x >= '0' and x <= '9') or x == '_',
62,
"extension_name %s not valid" %
extension_name)
# c.execute("SELECT extension_name, definition FROM gpkg_extensions "
# "WHERE definition NOT LIKE 'Annex %' AND "
# "definition NOT LIKE 'http%' AND "
# "definition NOT LIKE 'mailto:%' AND "
# "definition NOT LIKE 'Extension Title%' ")
# rows = c.fetchall()
# for (extension_name, definition) in rows:
# self._assert(False, 63,
# "extension_name %s has invalid definition %s" %
# (extension_name, definition))
c.execute("SELECT extension_name, scope FROM gpkg_extensions "
"WHERE scope NOT IN ('read-write', 'write-only')")
rows = c.fetchall()
for (extension_name, scope) in rows:
self._assert(False, 64,
"extension_name %s has invalid scope %s" %
(extension_name, scope))
c.execute("SELECT table_name, scope FROM gpkg_extensions "
"WHERE extension_name = 'gpkg_rtree_index' ")
rows = c.fetchall()
for (table_name, scope) in rows:
c.execute("SELECT 1 FROM gpkg_contents WHERE lower(table_name) = lower(?) "
"AND data_type = 'features'", (table_name,))
self._assert(c.fetchone() is not None, 75,
('gpkg_extensions declares gpkg_rtree_index for %s,' +
' but this is not a features table') % table_name)
self._assert(scope == 'write-only', 75,
'Invalid scope %s for gpkg_rtree_index' % scope)
def _check_metadata(self, c):
self._log('Checking gpkg_metadata')
must_have_gpkg_metadata = False
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
if c.fetchone() is not None:
c.execute("SELECT scope FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_metadata'")
row = c.fetchone()
if row is not None:
must_have_gpkg_metadata = True
(scope, ) = row
self._assert(scope == 'read-write', 140,
"Wrong scope for gpkg_metadata in "
"gpkg_extensions")
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_metadata'")
if c.fetchone() is None:
if must_have_gpkg_metadata:
self._assert(False, 140, "gpkg_metadata table missing")
else:
self._log('... No metadata')
return
c.execute("PRAGMA table_info(gpkg_metadata)")
columns = c.fetchall()
expected_columns = [
(0, 'id', 'INTEGER', 1, None, 1),
(1, 'md_scope', 'TEXT', 1, "'dataset'", 0),
(2, 'md_standard_uri', 'TEXT', 1, None, 0),
(3, 'mime_type', 'TEXT', 1, "'text/xml'", 0),
(4, 'metadata', 'TEXT', 1, "''", 0)
]
self._check_structure(columns, expected_columns, 93,
'gpkg_metadata')
c.execute("SELECT 1 FROM sqlite_master "
"WHERE name = 'gpkg_metadata_reference'")
self._assert(c.fetchone() is not None, 95,
"gpkg_metadata_reference is missing")
c.execute("PRAGMA table_info(gpkg_metadata_reference)")
columns = c.fetchall()
expected_columns = [
(0, 'reference_scope', 'TEXT', 1, None, 0),
(1, 'table_name', 'TEXT', 0, None, 0),
(2, 'column_name', 'TEXT', 0, None, 0),
(3, 'row_id_value', 'INTEGER', 0, None, 0),
(4, 'timestamp', 'DATETIME', 1,
"strftime('%Y-%m-%dT%H:%M:%fZ','now')", 0),
(5, 'md_file_id', 'INTEGER', 1, None, 0),
(6, 'md_parent_id', 'INTEGER', 0, None, 0)
]
self._check_structure(columns, expected_columns, 95,
'gpkg_metadata_reference')
c.execute("SELECT DISTINCT md_scope FROM gpkg_metadata WHERE "
"md_scope NOT IN ('undefined', 'fieldSession', "
"'collectionSession', 'series', 'dataset', 'featureType', "
"'feature', 'attributeType', 'attribute', 'tile', "
"'model', 'catalog', 'schema', 'taxonomy', 'software', "
"'service', 'collectionHardware', 'nonGeographicDataset', "
"'dimensionGroup')")
rows = c.fetchall()
for (md_scope, ) in rows:
self._assert(False, 94, 'Invalid md_scope %s found' % md_scope)
c.execute("SELECT DISTINCT reference_scope FROM "
"gpkg_metadata_reference WHERE "
"reference_scope NOT IN ('geopackage', 'table', "
"'column', 'row', 'row/col')")
rows = c.fetchall()
for (md_scope, ) in rows:
self._assert(False, 96,
'Invalid reference_scope %s found' % md_scope)
c.execute("SELECT table_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope = 'geopackage' AND table_name is NOT NULL")
rows = c.fetchall()
for (table_name, ) in rows:
self._assert(False, 97,
"row in gpkg_metadata_reference with table_name " +
"not null (%s)" % table_name +
"but reference_scope = geopackage")
c.execute("SELECT table_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope != 'geopackage'")
rows = c.fetchall()
for (table_name, ) in rows:
self._assert(table_name is not None, 97,
"row in gpkg_metadata_reference with null table_name")
c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ?",
(table_name,))
self._assert(c.fetchone() is not None, 97,
"row in gpkg_metadata_reference with table_name " +
"not null (%s) with no reference in " % table_name +
"gpkg_contents but reference_scope != geopackage")
c.execute("SELECT table_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope IN ('geopackage', 'table', 'row') "
"AND column_name is NOT NULL")
rows = c.fetchall()
for (table_name, ) in rows:
self._assert(False, 98,
"row in gpkg_metadata_reference with column_name " +
"not null (table=%s)" % table_name +
"but reference_scope = geopackage, table or row")
c.execute("SELECT table_name, column_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope NOT IN ('geopackage', 'table', 'row')")
rows = c.fetchall()
for (table_name, column_name) in rows:
self._assert(column_name is not None, 98,
"row in gpkg_metadata_reference with null "
"column_name")
try:
c.execute("SELECT %s FROM %s" %
(_esc_id(column_name), _esc_id(table_name)))
except:
self._assert(False, 98,
"column %s of %s does not exist" %
(column_name, table_name))
c.execute("SELECT table_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope IN ('geopackage', 'table', 'column') "
"AND row_id_value is NOT NULL")
rows = c.fetchall()
for (table_name, ) in rows:
self._assert(False, 99,
"row in gpkg_metadata_reference with row_id_value " +
"not null (table=%s)" % table_name +
"but reference_scope = geopackage, table or column")
c.execute("SELECT table_name, row_id_value FROM "
"gpkg_metadata_reference WHERE "
"reference_scope NOT IN ('geopackage', 'table', 'column')")
rows = c.fetchall()
for (table_name, row_id_value) in rows:
self._assert(row_id_value is not None, 99,
"row in gpkg_metadata_reference with null "
"row_id_value")
c.execute("SELECT 1 FROM %s WHERE ROWID = ?" %
_esc_id(column_name), (row_id_value, ))
self._assert(c.fetchone() is not None, 99,
"row %s of %s does not exist" %
(str(row_id_value), table_name))
c.execute("SELECT timestamp FROM gpkg_metadata_reference")
rows = c.fetchall()
for (timestamp, ) in rows:
try:
datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self._assert(False, 100,
('timestamp = %s in gpkg_metadata_reference' +
'is invalid datetime') % (timestamp))
c.execute("SELECT md_file_id FROM gpkg_metadata_reference")
rows = c.fetchall()
for (md_file_id, ) in rows:
c.execute("SELECT 1 FROM gpkg_metadata WHERE id = ?",
(md_file_id,))
self._assert(c.fetchone() is not None, 101,
"md_file_id = %s " % str(md_file_id) +
"does not have a row in gpkg_metadata")
c.execute("SELECT md_parent_id FROM gpkg_metadata_reference "
"WHERE md_parent_id IS NOT NULL")
rows = c.fetchall()
for (md_parent_id, ) in rows:
c.execute("SELECT 1 FROM gpkg_metadata WHERE id = ?",
(md_parent_id,))
self._assert(c.fetchone() is not None, 102,
"md_parent_id = %s " % str(md_parent_id) +
"does not have a row in gpkg_metadata")
c.execute("SELECT md_file_id FROM "
"gpkg_metadata_reference WHERE md_parent_id IS NOT NULL "
"AND md_file_id = md_parent_id")
rows = c.fetchall()
for (md_file_id, ) in rows:
self._assert(False, 102,
"Row with md_file_id = md_parent_id = %s " %
str(md_file_id))
def check(self):
self._assert(os.path.exists(self.filename), None,
"%s does not exist" % self.filename)
self._assert(self.filename.lower().endswith('.gpkg'), 3,
"filename extension isn't .gpkg'")
with open(self.filename, 'rb') as f:
f.seek(68, 0)
application_id = struct.unpack('B' * 4, f.read(4))
gp10 = struct.unpack('B' * 4, 'GP10'.encode('ASCII'))
gp11 = struct.unpack('B' * 4, 'GP11'.encode('ASCII'))
gpkg = struct.unpack('B' * 4, 'GPKG'.encode('ASCII'))
self._assert(application_id in (gp10, gp11, gpkg), 2,
("Wrong application_id: %s. " +
"Expected one of GP10, GP11, GPKG") %
str(application_id))
if application_id == gpkg:
f.seek(60, 0)
user_version = f.read(4)
expected_version = 10200
user_version = struct.unpack('>I', user_version)[0]
self._assert(user_version >= expected_version, 2,
'Wrong user_version: %d. Expected >= %d' %
(user_version, expected_version))
conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute('CREATE TABLE foo(one TEXT, two TEXT, '
'CONSTRAINT pk PRIMARY KEY (one, two))')
c.execute('PRAGMA table_info(foo)')
rows = c.fetchall()
if rows[1][5] == 2:
self.extended_pragma_info = True
c.close()
conn.close()
conn = sqlite3.connect(self.filename)
c = conn.cursor()
try:
try:
c.execute('SELECT 1 FROM sqlite_master')
c.fetchone()
except:
self._assert(False, 1, 'not a sqlite3 database')
c.execute('PRAGMA foreign_key_check')
ret = c.fetchall()
self._assert(len(ret) == 0, 7,
'foreign_key_check failed: %s' % str(ret))
c.execute('PRAGMA integrity_check')
self._assert(c.fetchone()[0] == 'ok', 6, 'integrity_check failed')
self._check_gpkg_spatial_ref_sys(c)
self._check_gpkg_contents(c)
self._check_features(c)
self._check_tiles(c)
self._check_attributes(c)
self._check_tiled_gridded_coverage_data(c)
self._check_gpkg_extensions(c)
self._check_metadata(c)
# TODO: check gpkg_schema
finally:
c.close()
conn.close()
def check(filename, abort_at_first_error=True, verbose=False):
checker = GPKGChecker(filename,
abort_at_first_error=abort_at_first_error,
verbose=verbose)
checker.check()
return checker.errors
def Usage():
print('validate_gpkg.py [[-v]|[-q]] [-k] my.gpkg')
print('')
print('-q: quiet mode')
print('-k: (try to) keep going when error is encountered')
sys.exit(1)
if __name__ == '__main__':
filename = None
verbose = False
abort_at_first_error = True
if len(sys.argv) == 1:
Usage()
for arg in sys.argv[1:]:
if arg == '-k':
abort_at_first_error = False
elif arg == '-q':
verbose = False
elif arg == '-v':
verbose = True
elif arg[0] == '-':
Usage()
else:
filename = arg
if filename is None:
Usage()
ret = check(filename, abort_at_first_error=abort_at_first_error,
verbose=verbose)
if not abort_at_first_error:
if not ret:
sys.exit(0)
else:
for (req, msg) in ret:
if req:
print('Req %d: %s' % (req, msg))
else:
print(msg)
sys.exit(1)
| 47.190506 | 114 | 0.500385 |
efd3bd307152c17984e010ae708c8f80290a7624 | 627 | py | Python | src/tareas/tarea6.py | HectorMtz22/lang-pro-pia | 0fb9cc6190f23f5583076709938ff9c74175d78d | [
"WTFPL"
] | null | null | null | src/tareas/tarea6.py | HectorMtz22/lang-pro-pia | 0fb9cc6190f23f5583076709938ff9c74175d78d | [
"WTFPL"
] | null | null | null | src/tareas/tarea6.py | HectorMtz22/lang-pro-pia | 0fb9cc6190f23f5583076709938ff9c74175d78d | [
"WTFPL"
] | null | null | null | print("CONVERSOR DE DECIMALES A OTRA BASE")
number = input("Ingresa un numero de base decimal: ")
base = input("Ingresa la base: ")
numbers = []
def residuo(number, base, iteration = 0):
cociente = number // base
res = number % base
print(" " * iteration, res, cociente)
numbers.append(res)
if (cociente < base):
numbers.append(cociente)
return
else:
return residuo(cociente, base, iteration + 1)
base = int(base)
residuo(int(number), base)
print("El resultante base", base, "es: ", end='')
for i in range(len(numbers) - 1, -1, -1):
print(numbers[i], end='')
print()
| 24.115385 | 54 | 0.62201 |
7237faca204a0a651412e7ca60de06a07badae2f | 2,037 | py | Python | book/chap12/llf/setplot.py | geoflows/geoclaw-4.x | c8879d25405017b38392aa3b1ea422ff3e3604ea | [
"BSD-3-Clause"
] | 7 | 2016-11-13T03:11:51.000Z | 2021-09-07T18:59:48.000Z | book/chap12/llf/setplot.py | che-wenchao/D-Claw | 8ab5d971c9a7a7130e03a447a4b8642e292f4e88 | [
"BSD-3-Clause"
] | 11 | 2020-01-14T18:00:37.000Z | 2022-03-29T14:25:24.000Z | book/chap12/llf/setplot.py | che-wenchao/D-Claw | 8ab5d971c9a7a7130e03a447a4b8642e292f4e88 | [
"BSD-3-Clause"
] | 6 | 2020-01-14T17:15:42.000Z | 2021-12-03T17:28:44.000Z |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
plotdata.clearfigures() # clear any old figures,axes,items data
# Figure for q[0]
plotfigure = plotdata.new_plotfigure(name='q[0]', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = [-1.5, 2.5]
plotaxes.title = 'q[0]'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = '-o'
plotitem.color = 'b'
plotitem.show = True # show on plot?
def addtrue(current_data):
from pylab import plot
t = current_data.t
plot([-3, -t, 2*t, 3], [-1, -1, 2, 2])
plotaxes.afteraxes = addtrue
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
| 31.338462 | 74 | 0.607266 |
bee153af226a557cfa60eda1f35d3b328dc74fc9 | 1,503 | py | Python | Exercises/5.online_shop.py | GiorgosXonikis/Python-Sample-Code | 8d31444171138995f740128716e45b29f5e1f7a1 | [
"MIT"
] | null | null | null | Exercises/5.online_shop.py | GiorgosXonikis/Python-Sample-Code | 8d31444171138995f740128716e45b29f5e1f7a1 | [
"MIT"
] | null | null | null | Exercises/5.online_shop.py | GiorgosXonikis/Python-Sample-Code | 8d31444171138995f740128716e45b29f5e1f7a1 | [
"MIT"
] | null | null | null |
# 5.Online Shop
# Assume you have an online shop, and you have a list of orders.
# You don’t like that people order in small quantities, which is why you
# would like to add an additional CHF 10 to the total price, for all orders
# that are below CHF 100 in total.
# Write a short Python function compute_totals that returns a list of
# 2-tuples. Each tuple consists of the order number and the total price.
# The total price should be increased by CHF 10 if the total is less than
# CHF 100.
# TODO with filter()
def compute_totals(input_list):
list_of_lists = []
output_list = []
# extract the id and multiple the value with quantity
for _dict in input_list:
list_of_lists.append([_dict['id'], _dict['quantity'] * _dict['price_per_item']])
# check if the total price is under 100 CHF
for _list in list_of_lists:
if _list[1] < 100:
_list[1] += 10
output_list.append(tuple(_list))
return output_list
orders = [
{
'id': 'order_001',
'item': 'Introduction to Python',
'quantity': 1,
'price_per_item': 32,
},
{
'id': 'order_002',
'item': 'Advanced Python',
'quantity': 3,
'price_per_item': 40,
},
{
'id': 'order_003',
'item': 'Python web frameworks',
'quantity': 2,
'price_per_item': 51,
},
]
totals = compute_totals(orders)
print(totals)
# totals is [('order_001', 42), ('order_002', 120), ('order_003', 102)]
| 26.368421 | 88 | 0.624085 |
e33060eaa2dd7c46b30dcd3ca5f04cab05aecc5b | 1,697 | py | Python | EMU/ingest/census_example.py | DistrictDataLabs/03-EMU | 73c98a9bab74e8802c829a4391877372aa8e2475 | [
"Apache-2.0"
] | null | null | null | EMU/ingest/census_example.py | DistrictDataLabs/03-EMU | 73c98a9bab74e8802c829a4391877372aa8e2475 | [
"Apache-2.0"
] | null | null | null | EMU/ingest/census_example.py | DistrictDataLabs/03-EMU | 73c98a9bab74e8802c829a4391877372aa8e2475 | [
"Apache-2.0"
] | null | null | null | """
Some basic examples of how to use the Census API class...
"""
import census
"""
Before you use this, do the following:
1. There is a file called API.conf.SAMPLE, copy this file and rename it to API.CONF
2. Move this file so it is in the EMU directory (NOT 03-EMU directory)
3. Copy and paste your Census API key in the spot. We will be using this file from now on to hide our API keys (not committing them to github).
"""
c = census.Census()
"""
Set your variables...
"""
# python dict of commonly used variables
print "Top level categories: ", c.acs_variables.keys()
econ_income = c.acs_variables['Economic']['income']['variable']
print "econ_income variable: ", econ_income
age_vars = []
for key,value in c.acs_variables["Age"].items():
print key
age_vars.append(value["variable"])
query_variables = age_vars + [econ_income]
print "\nAll variables to be queried: ", query_variables
"""
Set your locations...
"""
print "\nHow to access counties within a state: "
print c.state_county_codes.keys()
print c.state_county_codes["VA"]
print "Get a county code: ",
county_code = c.state_county_codes["VA"]["Fairfax"]
print county_code
"""
Query!
"""
response = c.get(query_variables, counties=[county_code], states=["VA"])
for r in response:
print r
#user defined variables
response_2 = c.get(["B01002_002E"], zip_codes=["20001"])
for r in response_2:
print r
#manual query, 'http://api.census.gov/data/' is provided..fill in the rest..
response_3 = c.manual_query("2011/acs5?&get=B01002_002E,NAME&for=zip+code+tabulation+area:20001")
for r in response_3:
print r
#all states...
response_4 = c.get(["B01002_002E"], states="*")
for r in response_4:
print r
| 24.594203 | 143 | 0.713023 |
37dda117c31be793a029ed058b7f664eab456170 | 4,969 | py | Python | tests/unit/tools/test_files.py | debexpo/debexpo | 866b0e61726b14425f02e10977398444785337be | [
"MIT"
] | 3 | 2016-02-17T08:47:07.000Z | 2017-05-20T20:43:23.000Z | tests/unit/tools/test_files.py | debexpo/debexpo | 866b0e61726b14425f02e10977398444785337be | [
"MIT"
] | 44 | 2015-01-11T00:54:24.000Z | 2019-08-17T21:06:25.000Z | tests/unit/tools/test_files.py | debexpo/debexpo | 866b0e61726b14425f02e10977398444785337be | [
"MIT"
] | 8 | 2015-01-12T00:57:32.000Z | 2017-01-20T23:01:52.000Z | # test_files.py - Unit testing for files helpers
#
# This file is part of debexpo -
# https://salsa.debian.org/mentors.debian.net-team/debexpo
#
# Copyright © 2008 Serafeim Zanikolas <serzan@hellug.gr>
# 2019 Baptiste Beauplat <lyknode@cilg.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Test cases for debexpo.tools.files
"""
from tests import TestController
from debexpo.tools.gnupg import ExceptionGnuPGNotSignedFile, \
ExceptionGnuPG, ExceptionGnuPGNoPubKey
from debexpo.tools.files import GPGSignedFile, CheckSumedFile, \
ExceptionCheckSumedFileNoFile, ExceptionCheckSumedFileFailedSum, \
ExceptionCheckSumedFileNoMethod
from debexpo.accounts.models import User
from debexpo.keyring.models import Key
from tests.unit.tools.test_gnupg import signed_file, test_gpg_key, \
test_gpg_key_fpr, test_gpg_key_algo, test_gpg_key_size, signed_file_v1, \
test_gpg1_key, test_gpg1_key_fpr, test_gpg1_key_algo, test_gpg1_key_size
class TestGPGSignedFileController(TestController):
def test_invalid_file(self):
gpg_file = GPGSignedFile('/noexistent')
self.assertRaises(ExceptionGnuPG, gpg_file.authenticate)
def test_plain_file(self):
gpg_file = GPGSignedFile('/etc/passwd')
self.assertRaises(ExceptionGnuPGNotSignedFile, gpg_file.authenticate)
def test_signed_with_unknown_key(self):
gpg_file = GPGSignedFile(signed_file)
self.assertRaises(ExceptionGnuPGNoPubKey, gpg_file.authenticate)
def test_signed_file_gpg1(self):
self.test_signed_file(test_gpg1_key, test_gpg1_key_fpr,
test_gpg1_key_algo, test_gpg1_key_size,
'rsa', signed_file_v1)
def test_signed_file(self, key=test_gpg_key, fpr=test_gpg_key_fpr,
algo=test_gpg_key_algo, size=test_gpg_key_size,
algo_str='ed25519', filename=signed_file):
# Setup user
user = User.objects.create_user('debexpo@example.org',
'debexpo testing', 'password')
user.save()
# Setup key
self._add_gpg_key(user, key, fpr, algo, size)
changes = GPGSignedFile(filename)
changes.authenticate()
self.assertEquals(changes.get_key(), Key.objects.get(user=user))
self.assertEquals(str(changes.get_key().algorithm), algo_str)
# Remove user and key
user.delete()
class TestCheckSumedFile(TestController):
def test_invalid_file(self):
sumed_file = CheckSumedFile('/noexistent')
sumed_file.add_checksum('sha256', '0')
try:
sumed_file.validate()
except ExceptionCheckSumedFileNoFile as e:
self.assertIn('noexistent', str(e))
def test_plain_file(self):
sumed_file = CheckSumedFile('/etc/passwd')
try:
sumed_file.validate()
except ExceptionCheckSumedFileNoMethod as e:
self.assertIn('passwd', str(e))
self.assertIn('No checksum method available', str(e))
def test_wrong_sumed_file(self):
sumed_file = CheckSumedFile('/etc/passwd')
sumed_file.add_checksum('sha256', '0')
try:
sumed_file.validate()
except ExceptionCheckSumedFileFailedSum as e:
self.assertIn('passwd', str(e))
self.assertIn('Checksum failed', str(e))
def test_bad_method_sumed_file(self):
sumed_file = CheckSumedFile('/etc/passwd')
sumed_file.add_checksum('this_hash_algo_does_not_exists', 0)
self.assertRaises(ExceptionCheckSumedFileNoMethod, sumed_file.validate)
def test_sumed_file(self):
sumed_file = CheckSumedFile('/dev/null')
sumed_file.add_checksum(
'sha256',
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
)
self.assertTrue(sumed_file.validate())
| 40.398374 | 79 | 0.697525 |
a237e0405e44788db074eb3442d48c269a240cf9 | 2,299 | py | Python | troposphere/secretsmanager.py | mansdahlstrom1/troposphere | deacf87ac179ee32bff5f07ee969d9c3dfebf10d | [
"BSD-2-Clause"
] | 1 | 2021-02-14T15:18:12.000Z | 2021-02-14T15:18:12.000Z | troposphere/secretsmanager.py | mansdahlstrom1/troposphere | deacf87ac179ee32bff5f07ee969d9c3dfebf10d | [
"BSD-2-Clause"
] | null | null | null | troposphere/secretsmanager.py | mansdahlstrom1/troposphere | deacf87ac179ee32bff5f07ee969d9c3dfebf10d | [
"BSD-2-Clause"
] | 5 | 2020-05-10T13:50:32.000Z | 2021-09-09T09:06:54.000Z | # Copyright (c) 2015, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .compat import policytypes
from .validators import integer, boolean
VALID_TARGET_TYPES = ('AWS::RDS::DBInstance', 'AWS::RDS::DBCluster')
def validate_target_types(target_type):
"""Target types validation rule."""
if target_type not in VALID_TARGET_TYPES:
raise ValueError("Target type must be one of : %s" %
", ".join(VALID_TARGET_TYPES))
return target_type
class ResourcePolicy(AWSObject):
resource_type = "AWS::SecretsManager::ResourcePolicy"
props = {
'SecretId': (basestring, True),
'ResourcePolicy': (policytypes, True),
}
class RotationRules(AWSProperty):
props = {
'AutomaticallyAfterDays': (integer, False),
}
class RotationSchedule(AWSObject):
resource_type = "AWS::SecretsManager::RotationSchedule"
props = {
'SecretId': (basestring, True),
'RotationLambdaARN': (basestring, True),
'RotationRules': (RotationRules, False)
}
class SecretTargetAttachment(AWSObject):
resource_type = "AWS::SecretsManager::SecretTargetAttachment"
props = {
'SecretId': (basestring, True),
'TargetId': (basestring, True),
'TargetType': (validate_target_types, True),
}
class GenerateSecretString(AWSProperty):
props = {
'ExcludeUppercase': (boolean, False),
'RequireEachIncludedType': (boolean, False),
'IncludeSpace': (boolean, False),
'ExcludeCharacters': (basestring, False),
'GenerateStringKey': (basestring, False),
'PasswordLength': (integer, False),
'ExcludePunctuation': (boolean, False),
'ExcludeLowercase': (boolean, False),
'SecretStringTemplate': (basestring, False),
'ExcludeNumbers': (boolean, False),
}
class Secret(AWSObject):
resource_type = "AWS::SecretsManager::Secret"
props = {
'Description': (basestring, False),
'KmsKeyId': (basestring, False),
'SecretString': (basestring, False),
'GenerateSecretString': (GenerateSecretString, False),
'Name': (basestring, False),
'Tags': ((Tags, list), False),
}
| 27.698795 | 68 | 0.644628 |
cdb161e109dcaed0ee04b8a490472ebef5fedd06 | 3,599 | py | Python | Trabalhos_Python/Fase_2/Iluminacao_prisma/prisma_iluminado.py | LucassAbm/Trabalhos_CompGrafica | 499ff1c33bcd5a49848fb76d1d6c40ee33d4b0b6 | [
"MIT"
] | null | null | null | Trabalhos_Python/Fase_2/Iluminacao_prisma/prisma_iluminado.py | LucassAbm/Trabalhos_CompGrafica | 499ff1c33bcd5a49848fb76d1d6c40ee33d4b0b6 | [
"MIT"
] | null | null | null | Trabalhos_Python/Fase_2/Iluminacao_prisma/prisma_iluminado.py | LucassAbm/Trabalhos_CompGrafica | 499ff1c33bcd5a49848fb76d1d6c40ee33d4b0b6 | [
"MIT"
] | 1 | 2022-03-14T02:48:39.000Z | 2022-03-14T02:48:39.000Z | from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
import math
from math import *
import sys
a = 0
def calculaNormalFace(v0, v1, v2):
x = 0
y = 1
z = 2
U = (v2[x]-v0[x], v2[y]-v0[y], v2[z]-v0[z])
V = (v1[x]-v0[x], v1[y]-v0[y], v1[z]-v0[z])
N = ((U[y]*V[z]-U[z]*V[y]),(U[z]*V[x]-U[x]*V[z]),(U[x]*V[y]-U[y]*V[x]))
NLength = sqrt(N[x]*N[x]+N[y]*N[y]+N[z]*N[z])
return (N[x]/NLength, N[y]/NLength, N[z]/NLength)
def prisma():
raio = 2
N = 5
H = 4
modificador = 1
pontosBase = []
angulo = (2*math.pi)/N
glPushMatrix()
glTranslatef(0,-2,0)
glRotatef(a,0.0,1.0,0.0)
glRotatef(-110,1.0,0.0,0.0)
# BASE
glBegin(GL_POLYGON)
for i in range(0,N):
x = raio * math.cos(i*angulo)
y = raio * math.sin(i*angulo)
pontosBase += [ (x,y) ]
glVertex3f(x,y,0.0)
u = (pontosBase[0][0], pontosBase[0][1], 0)
v = (pontosBase[1][0], pontosBase[1][1], 0)
p = (pontosBase[2][0], pontosBase[2][1], 0)
glNormal3fv(calculaNormalFace(u,v,p))
glEnd()
# TOPO
glBegin(GL_POLYGON)
for x,y in pontosBase:
glVertex3f(modificador*x,modificador*y, H)
u = (pontosBase[0][0], pontosBase[0][1], H)
v = (pontosBase[1][0], pontosBase[1][1], H)
p = (pontosBase[2][0], pontosBase[2][1], H)
glNormal3fv(calculaNormalFace(u,v,p))
glEnd()
# LATERAL
glBegin(GL_QUADS)
for i in range(0,N):
u = (pontosBase[i][0], pontosBase[i][1],0.0)
v = (modificador*pontosBase[i][0],modificador*pontosBase[i][1],H)
p = (modificador*pontosBase[(i+1)%N][0],modificador*pontosBase[(i+1)%N][1],H)
q = (pontosBase[(i+1)%N][0],pontosBase[(i+1)%N][1],0.0)
glNormal3fv(calculaNormalFace(u,v,q))
glVertex3fv(u)
glVertex3fv(v)
glVertex3fv(p)
glVertex3fv(q)
glEnd()
glPopMatrix()
def desenha():
global a
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
a+=1
prisma()
glutSwapBuffers()
def timer(i):
glutPostRedisplay()
glutTimerFunc(15,timer,1)
def reshape(w,h):
glViewport(0,0,w,h)
glMatrixMode(GL_PROJECTION)
gluPerspective(45,float(w)/float(h),0.1,50.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(10,0,0,0,0,0,0,1,0)
def init():
mat_ambient = (0.25, 0.148, 0.06475, 1.0)
mat_diffuse = (0.4, 0.2368, 0.1036, 1.0)
mat_specular = (0.774597, 0.458561, 0.200621, 1.0)
mat_shininess = (76.8)
light_position = (300.0, 300.0, 300.0, 1.0)
glClearColor(0.0,0.0,0.0,0.0)
glShadeModel(GL_SMOOTH)
glMaterialfv(GL_FRONT, GL_AMBIENT, mat_ambient)
glMaterialfv(GL_FRONT, GL_DIFFUSE, mat_diffuse)
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular)
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_DEPTH_TEST)
glEnable(GL_MULTISAMPLE)
# PROGRAMA PRINCIPAL
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH | GLUT_MULTISAMPLE)
glutInitWindowSize(800,600)
glutCreateWindow("Prisma Iluminado")
glutReshapeFunc(reshape)
glutDisplayFunc(desenha)
glEnable(GL_MULTISAMPLE)
glEnable(GL_DEPTH_TEST)
gluPerspective(45,800.0/600.0,0.1,100.0)
glTranslatef(0.0,0.0,-10)
glutTimerFunc(50,timer,1)
init()
glutMainLoop()
main() | 27.06015 | 86 | 0.589886 |
0995b06f369f13b47539de76dcc31a4570f7953f | 1,787 | py | Python | src/export_embeddings_artifacts.py | TimurDzhumakaev/retailhero-recommender-workspace | 419576ab47d0cbb1aa2d50e1d1ea17c71c04726d | [
"MIT"
] | 100 | 2020-01-09T10:46:26.000Z | 2022-03-24T08:25:40.000Z | src/export_embeddings_artifacts.py | TimurDzhumakaev/retailhero-recommender-workspace | 419576ab47d0cbb1aa2d50e1d1ea17c71c04726d | [
"MIT"
] | 1 | 2020-09-28T05:25:32.000Z | 2020-09-28T05:25:32.000Z | src/export_embeddings_artifacts.py | TimurDzhumakaev/retailhero-recommender-workspace | 419576ab47d0cbb1aa2d50e1d1ea17c71c04726d | [
"MIT"
] | 26 | 2020-01-20T08:39:04.000Z | 2022-01-18T08:46:17.000Z | # if you are confused with `artifact` in filename, please read
# https://english.stackexchange.com/questions/37903/difference-between-artifact-and-artefact
import glob
import json
import os
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from scipy import sparse as sp
from torch import nn
from tqdm import tqdm
from tqdm.notebook import tqdm
import config as cfg
from nn_models import ItemModel, UserModel
from utils import ProductEncoder, TrainingSample, coo_to_pytorch_sparse, normalized_average_precision
if __name__ == "__main__":
product_encoder = ProductEncoder(cfg.PRODUCT_CSV_PATH)
dim = 128
input_dir = "../tmp/embds_d{}/".format(dim)
output_dir = "../artifacts/embds_d{}/".format(dim)
os.makedirs(output_dir, exist_ok=True)
# load models
user_model = UserModel(product_encoder.num_products, dim)
item_model = ItemModel(product_encoder.num_products, dim)
user_model.load_state_dict(torch.load(input_dir + "/user_model.pth"))
item_model.load_state_dict(torch.load(input_dir + "/item_model.pth"))
# conver user model to cpu
user_model = user_model.cpu()
torch.save(user_model.state_dict(), output_dir + "/user_model_cpu.pth")
# export normalized item vectors
item_vectors = item_model._embeds.weight.data.cpu().numpy()
item_vectors /= np.linalg.norm(item_vectors, axis=1, keepdims=True)
np.save(output_dir + "/item_vectors.npy", item_vectors)
# export knn index (compression and speed-up by FAISS, with Inner Product as distance)
import faiss
quantizer = faiss.IndexFlatIP(dim)
index = faiss.IndexIVFPQ(quantizer, dim, 128, 16, 8)
index.train(item_vectors)
index.add(item_vectors)
faiss.write_index(index, output_dir + "/knn.idx")
| 33.092593 | 101 | 0.74986 |
82af6901499a848e02d16713436739fbe5b423e9 | 4,539 | py | Python | mldock/api/registry.py | mldock/mldock | 314b733e4f0102321727f8b145fc276486ecad85 | [
"Apache-2.0"
] | 2 | 2021-07-12T13:51:21.000Z | 2021-07-19T08:40:02.000Z | mldock/api/registry.py | mldock/mldock | 314b733e4f0102321727f8b145fc276486ecad85 | [
"Apache-2.0"
] | 41 | 2021-06-28T11:05:20.000Z | 2022-03-13T13:48:50.000Z | mldock/api/registry.py | mldock/mldock | 314b733e4f0102321727f8b145fc276486ecad85 | [
"Apache-2.0"
] | 1 | 2021-07-17T19:07:06.000Z | 2021-07-17T19:07:06.000Z | """Registry API methods"""
import logging
import click
import docker
logger = logging.getLogger("mldock")
def compute_progress(current, max_value=10):
"""
compute the progress bar update
args:
current (int): current count of progress bar
max_value (int): (default=10) maximum length of progress bar
returns:
(int): updated progress bar
"""
if current > max_value:
multiple = current // max_value
current = current - max_value * multiple
return current
def get_layer_state(stream: str, layer_id: str, state: dict = None, **kwargs):
"""
Computes a status update to the state of layer during a push.
Additionally, adds a progress bar and flexible fill character.
args:
stream (str): stream message
layer_id (str): stream layer id
state (dict) (default=None) current state
kwargs:
progress (str): (default = '') starting progress bar string
fill_char (str): (default='=') fill char to use as progress bar
increment (str): (default=2) progress bar char increment to use
max_value (str): (default=40) maximum progress bar length
status_tag (str): (default='pushing') key to use to look for updates
"""
progress = kwargs.get("progress", "")
fill_char = kwargs.get("fill_char", "=")
increment = kwargs.get("increment", 2)
max_value = kwargs.get("max_value", 40)
status_tag = kwargs.get("status_tag", "pushing")
try:
if status_tag.lower() == "pushing":
# progress = ''
if stream.lower() == status_tag.lower():
current_layer = state[layer_id]
current_progress = (
current_layer["progress"]
.replace("[", "")
.replace("]", "")
.replace(">", "")
)
current_progress = len(current_progress)
progress = (
compute_progress(current_progress, max_value=max_value) + increment
)
progress = fill_char * progress
progress = "[{}>]".format(progress)
state.update({layer_id: {"message": stream, "progress": progress}})
except KeyError:
pass
return state
def stateful_log_emitter(line: dict, status_tag: str, states: dict = None):
"""
stateful log emitter
args:
line (dict): current record of logs
status_tag (str): key to use to update state
states (dict): current state of logs
"""
if states is None:
states = {}
error = line.get("error", None)
error_detail = line.get("errorDetail", None)
if error is not None:
logger.error("{}\n{}".format(error, error_detail))
stream = line.get("status", "")
layer_id = line.get("id", "")
progress = line.get("progress", "")
# perform status update, using layer_id as key
states = get_layer_state(
stream, layer_id, states, progress=progress, status_tag=status_tag
)
# clear terminal to allow for friendlier update
click.clear()
# emit current state to stdout
for key, value in states.items():
logger.info(
"{KEY}: {MESSAGE} {PROGRESS}".format(
KEY=key, MESSAGE=value["message"], PROGRESS=value["progress"]
)
)
return states
def push_image_to_repository(image_repository: str, auth_config: dict, tag="latest"):
"""
Push image to repository in registry. Using auth_config this will Authenticate client.
"""
client = docker.from_env()
push_response = client.images.push(
repository=image_repository,
tag=tag,
stream=True,
decode=True,
auth_config=auth_config,
)
states = {}
for line in push_response:
states = stateful_log_emitter(line=line, states=states, status_tag="Pushing")
return states
def pull_image_from_repository(image_repository: str, auth_config: dict, tag="latest"):
"""
Pull image from repository in registry. Using auth_config this will Authenticate client.
"""
client = docker.from_env()
pull_response = client.api.pull(
repository=image_repository,
tag=tag,
stream=True,
decode=True,
auth_config=auth_config,
)
states = {}
for line in pull_response:
states = stateful_log_emitter(
line=line, states=states, status_tag="Downloading"
)
return states
| 29.474026 | 92 | 0.604318 |
db6377516e714c33a67d8498493b5a1ef0ad3066 | 6,364 | py | Python | salt/returners/__init__.py | herlo/salt | 10ffb8315559c0cfbc10b4adc26cd62ebc462851 | [
"Apache-2.0"
] | null | null | null | salt/returners/__init__.py | herlo/salt | 10ffb8315559c0cfbc10b4adc26cd62ebc462851 | [
"Apache-2.0"
] | 1 | 2018-08-23T18:25:42.000Z | 2018-08-23T18:25:42.000Z | salt/returners/__init__.py | herlo/salt | 10ffb8315559c0cfbc10b4adc26cd62ebc462851 | [
"Apache-2.0"
] | 1 | 2018-04-19T16:57:27.000Z | 2018-04-19T16:57:27.000Z | # -*- coding: utf-8 -*-
'''
Returners Directory
:func:`get_returner_options` is a general purpose function that returners may
use to fetch their configuration options.
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
def get_returner_options(virtualname=None,
ret=None,
attrs=None,
**kwargs):
'''
Get the returner options from salt.
:param str virtualname: The returner virtualname (as returned
by __virtual__()
:param ret: result of the module that ran. dict-like object
May contain a `ret_config` key pointing to a string
If a `ret_config` is specified, config options are read from::
value.virtualname.option
If not, config options are read from::
value.virtualname.option
:param attrs: options the returner wants to read
:param __opts__: Optional dict-like object that contains a fallback config
in case the param `__salt__` is not supplied.
Defaults to empty dict.
:param __salt__: Optional dict-like object that exposes the salt API.
Defaults to empty dict.
a) if __salt__ contains a 'config.option' configuration options,
we infer the returner is being called from a state or module run ->
config is a copy of the `config.option` function
b) if __salt__ was not available, we infer that the returner is being
called from the Salt scheduler, so we look for the
configuration options in the param `__opts__`
-> cfg is a copy for the __opts__ dictionary
:param str profile_attr: Optional.
If supplied, an overriding config profile is read from
the corresponding key of `__salt__`.
:param dict profile_attrs: Optional
.. fixme:: only keys are read
For each key in profile_attr, a value is read in the are
used to fetch a value pointed by 'virtualname.%key' in
the dict found thanks to the param `profile_attr`
'''
ret_config = _fetch_ret_config(ret)
attrs = attrs or {}
profile_attr = kwargs.get('profile_attr', None)
profile_attrs = kwargs.get('profile_attrs', None)
defaults = kwargs.get('defaults', None)
__salt__ = kwargs.get('__salt__', {})
__opts__ = kwargs.get('__opts__', {})
# select the config source
cfg = __salt__.get('config.option', __opts__)
# browse the config for relevant options, store them in a dict
_options = dict(
_options_browser(
cfg,
ret_config,
defaults,
virtualname,
attrs,
)
)
# override some values with relevant profile options
_options.update(
_fetch_profile_opts(
cfg,
virtualname,
__salt__,
_options,
profile_attr,
profile_attrs
)
)
# override some values with relevant options from
# keyword arguments passed via return_kwargs
if ret and 'ret_kwargs' in ret:
_options.update(ret['ret_kwargs'])
return _options
def _fetch_ret_config(ret):
"""
Fetches 'ret_config' if available.
@see :func:`get_returner_options`
"""
if not ret:
return None
if 'ret_config' not in ret:
return ''
return str(ret['ret_config'])
def _fetch_option(cfg, ret_config, virtualname, attr_name):
"""
Fetch a given option value from the config.
@see :func:`get_returner_options`
"""
# c_cfg is a dictionary returned from config.option for
# any options configured for this returner.
if isinstance(cfg, dict):
c_cfg = cfg
else:
c_cfg = cfg('{0}'.format(virtualname), {})
default_cfg_key = '{0}.{1}'.format(virtualname, attr_name)
if not ret_config:
# Using the default configuration key
if isinstance(cfg, dict):
return c_cfg.get(attr_name, cfg.get(default_cfg_key))
else:
return c_cfg.get(attr_name, cfg(default_cfg_key))
# Using ret_config to override the default configuration key
ret_cfg = cfg('{0}.{1}'.format(ret_config, virtualname), {})
override_default_cfg_key = '{0}.{1}.{2}'.format(
ret_config,
virtualname,
attr_name,
)
override_cfg_default = cfg(override_default_cfg_key)
# Look for the configuration item in the override location
ret_override_cfg = ret_cfg.get(
attr_name,
override_cfg_default
)
if ret_override_cfg:
return ret_override_cfg
# if not configuration item found, fall back to the default location.
return c_cfg.get(attr_name, cfg(default_cfg_key))
def _options_browser(cfg, ret_config, defaults, virtualname, options):
"""
Iterator generating all duples ```option name -> value```
@see :func:`get_returner_options`
"""
for option in options:
# default place for the option in the config
value = _fetch_option(cfg, ret_config, virtualname, options[option])
if value:
yield option, value
continue
# Attribute not found, check for a default value
if defaults:
if option in defaults:
log.info('Using default for %s %s', virtualname, option)
yield option, defaults[option]
continue
# fallback (implicit else for all ifs)
yield option, ''
def _fetch_profile_opts(
cfg, virtualname,
__salt__,
_options,
profile_attr,
profile_attrs
):
"""
Fetches profile specific options if applicable
@see :func:`get_returner_options`
:return: a options dict
"""
if (not profile_attr) or (profile_attr not in _options):
return {}
# Using a profile and it is in _options
creds = {}
profile = _options[profile_attr]
if profile:
log.info('Using profile %s', profile)
if 'config.option' in __salt__:
creds = cfg(profile)
else:
creds = cfg.get(profile)
if not creds:
return {}
return dict(
(
pattr,
creds.get('{0}.{1}'.format(virtualname, profile_attrs[pattr]))
)
for pattr in profile_attrs
)
| 27.431034 | 79 | 0.62335 |
8a8451eae87aa7d497d3b253cd91dcb2a295e8ae | 404 | py | Python | protocol_controller/plugins/cbsd_sas/tests/fixtures/fake_requests/heartbeat_requests.py | magma/domain-proxy | e6567740e1780d011b0b3ebd366e134d77f434b3 | [
"BSD-3-Clause"
] | null | null | null | protocol_controller/plugins/cbsd_sas/tests/fixtures/fake_requests/heartbeat_requests.py | magma/domain-proxy | e6567740e1780d011b0b3ebd366e134d77f434b3 | [
"BSD-3-Clause"
] | 298 | 2021-03-31T19:29:45.000Z | 2022-03-31T11:30:44.000Z | protocol_controller/plugins/cbsd_sas/tests/fixtures/fake_requests/heartbeat_requests.py | openEPC/domain-proxy | e6567740e1780d011b0b3ebd366e134d77f434b3 | [
"BSD-3-Clause"
] | 5 | 2021-03-31T09:26:37.000Z | 2021-03-31T21:34:16.000Z | heartbeat_requests = [
{
"heartbeatRequest": [
{
"cbsdId": "foo",
"grantId": "foo1",
"operationState": "foo2"
}
]
},
{
"heartbeatRequest": [
{
"cbsdId": "bar",
"grantId": "bar1",
"operationState": "bar2"
}
]
}
]
| 19.238095 | 40 | 0.311881 |
eb81fb2a766050b3a06c54b9ba685f7c3c857462 | 2,010 | py | Python | cycada/util.py | yeeyangtee/cycada_release | 17d7207e27b9a93d477741f73b7a69525148120d | [
"BSD-2-Clause"
] | null | null | null | cycada/util.py | yeeyangtee/cycada_release | 17d7207e27b9a93d477741f73b7a69525148120d | [
"BSD-2-Clause"
] | null | null | null | cycada/util.py | yeeyangtee/cycada_release | 17d7207e27b9a93d477741f73b7a69525148120d | [
"BSD-2-Clause"
] | null | null | null | import logging
import logging.config
import os.path
from collections import OrderedDict
import numpy as np
import torch
import yaml
from torch.nn.parameter import Parameter
from tqdm import tqdm
class TqdmHandler(logging.StreamHandler):
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
msg = self.format(record)
tqdm.write(msg)
def config_logging(logfile=None):
path = os.path.join(os.path.dirname(__file__), 'logging.yml')
with open(path, 'r') as f:
config = yaml.safe_load(f.read())
if logfile is None:
del config['handlers']['file_handler']
del config['root']['handlers'][-1]
else:
config['handlers']['file_handler']['filename'] = logfile
logging.config.dictConfig(config)
def to_tensor_raw(im):
return torch.from_numpy(np.array(im, np.int64, copy=False))
def safe_load_state_dict(net, state_dict):
"""Copies parameters and buffers from :attr:`state_dict` into
this module and its descendants. Any params in :attr:`state_dict`
that do not match the keys returned by :attr:`net`'s :func:`state_dict()`
method or have differing sizes are skipped.
Arguments:
state_dict (dict): A dict containing parameters and
persistent buffers.
"""
own_state = net.state_dict()
skipped = []
for name, param in state_dict.items():
if name not in own_state:
skipped.append(name)
continue
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
if own_state[name].size() != param.size():
skipped.append(name)
continue
own_state[name].copy_(param)
if skipped:
logging.info('Skipped loading some parameters: {}'.format(skipped))
def step_lr(optimizer, mult):
for param_group in optimizer.param_groups:
lr = param_group['lr']
param_group['lr'] = lr * mult
| 28.714286 | 77 | 0.659204 |
6925ee76538c1a5bf435f982119ac56d518f765b | 3,250 | py | Python | source/python/backup/database.py | vkushnir/docker_syslog-ng | 7f5632231c1884a90a272a4c002448ca9133aa02 | [
"MIT"
] | null | null | null | source/python/backup/database.py | vkushnir/docker_syslog-ng | 7f5632231c1884a90a272a4c002448ca9133aa02 | [
"MIT"
] | null | null | null | source/python/backup/database.py | vkushnir/docker_syslog-ng | 7f5632231c1884a90a272a4c002448ca9133aa02 | [
"MIT"
] | null | null | null | # !/usr/bin/python2
""" DATABASE """
__all__ = ['database']
# Import required python libraries
import os, sys
import sqlite3
from syslog.utils import sprint, eprint, str_to_bool
count = 0
folder = '/var/sqlite'
file = os.getenv('DB_FILE')
path = os.path.join(folder, file)
in_memory = str_to_bool(os.getenv('DB_MEMORY'))
if in_memory:
database = ":memory:"
else:
database = path
save_on_exit = str_to_bool(os.getenv('DB_SAVE'))
# Init database
if not os.path.exists(folder):
os.makedirs(folder)
try:
with sqlite3.connect(database) as sql:
sql.execute("CREATE TABLE IF NOT EXISTS queue(ip TEXT PRIMARY KEY, lock BOOL DEFAULT 0, hits INTEGER DEFAULT 0, date DATETIME);")
sql.execute("CREATE TABLE IF NOT EXISTS auth(ip TEXT PRIMARY KEY, snmp_version TEXT, snmp_community TEXT, snmp_aprot TEXT, snmp_apass TEXT, snmp_seng_id TEXT, snmp_ceng_id TEXT, snmp_level TEXT, snmp_context TEXT, snmp_user TEXT, snmp_pprot TEXT, snmp_ppass TEXT, snmp_boots TEXT);")
sql.execute("CREATE TABLE IF NOT EXISTS servers(ip TEXT PRIMARY KEY, type TEXT, addr TEXT, path TEXT)")
sql.execute("CREATE TABLE IF NOT EXISTS vendors(ip TEXT PRIMARY KEY, sysObjectID TEXT, app TEXT, model TEXT, vendor TEXT, hits INTEGER, last DATETIME);")
except:
eprint ("can't create database", sys.exc_info()[0])
raise
if in_memory:
load(path)
def clear_locks():
with sqlite3.connect(database) as sql:
sql.execute("UPDATE queue SET lock=0")
def lock(ip):
try:
with sqlite3.connect(database) as sql:
sql.execute("INSERT INTO queue(ip, lock) values(?, 1)", (ip, ))
return True
except:
with sqlite3.connect(database) as sql:
sql.execute("UPDATE queue SET lock=1, hits=hits+1 WHERE ip=?", (ip, ))
eprint("device '"+ip+"' already in list")
return False
def release(ip):
with sqlite3.connect(database) as sql:
sql.execute("UPDATE OR IGNORE queue SET lock=0, hits=0 WHERE ip=?", (ip, ))
def clear(ip):
with sqlite3.connect(database) as sql:
sql.execute("DELETE FROM queue WHERE ip=?", (ip, ))
def update_vendor_oid(ip, oid):
with sqlite3.connect(database) as sql:
sql.execute("UPDATE OR IGNORE vendors SET sysObjectID=?, app=NULL, model=NULL, vendor=NULL WHERE ip=?", (oid, ip))
sql.execute("INSERT OR IGNORE INTO vendors(sysObjectID, ip) values(?, ?)", (oid, ip))
def get_auth(ip):
with sqlite3.connect(database) as sql:
sql.row_factory = sqlite3.Row
cur = sql.cursor()
cur.execute("SELECT * FROM auth WHERE ip=?", (ip, ))
row = cur.fetchone()
return row
def load(src):
if in_memory:
try:
with sqlite3.connect(database) as sql:
sql.execute("ATTACH DATABASE ? AS source; INSERT OR FAIL INTO main.queue SELECT * FROM source.queue;", (src, ))
sql.execute("ATTACH DATABASE ? AS source; INSERT OR FAIL INTO main.auth SELECT * FROM source.auth;", (src, ))
sql.execute("ATTACH DATABASE ? AS source; INSERT OR FAIL INTO main.vendors SELECT * FROM source.vendors;", (src, ))
except:
eprint ("can't transfer database from source", sys.exc_info()[0])
def save():
pass
| 37.790698 | 291 | 0.661538 |
8ee13cd4319f0d5edc6259af4ad0b176ee90068f | 520 | py | Python | output/models/nist_data/list_pkg/unsigned_byte/schema_instance/nistschema_sv_iv_list_unsigned_byte_pattern_2_xsd/nistschema_sv_iv_list_unsigned_byte_pattern_2.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/unsigned_byte/schema_instance/nistschema_sv_iv_list_unsigned_byte_pattern_2_xsd/nistschema_sv_iv_list_unsigned_byte_pattern_2.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/unsigned_byte/schema_instance/nistschema_sv_iv_list_unsigned_byte_pattern_2_xsd/nistschema_sv_iv_list_unsigned_byte_pattern_2.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-unsignedByte-pattern-2-NS"
@dataclass
class NistschemaSvIvListUnsignedBytePattern2:
class Meta:
name = "NISTSchema-SV-IV-list-unsignedByte-pattern-2"
namespace = "NISTSchema-SV-IV-list-unsignedByte-pattern-2-NS"
value: List[str] = field(
default_factory=list,
metadata={
"pattern": r"\d{1} \d{2} \d{3} \d{1} \d{2}",
"tokens": True,
}
)
| 26 | 69 | 0.638462 |
88f3b2d76c5a9f7fd876493bc8ddafe86b510178 | 1,884 | py | Python | src/affinity_matrix.py | vaishnavi-sridhar/CU_HIN | 686a0cf883fbface41acdcdeba44eb8805bb74e1 | [
"MIT"
] | 2 | 2021-02-24T21:32:33.000Z | 2021-06-08T19:29:56.000Z | src/affinity_matrix.py | vaishnavi-sridhar/CU_HIN | 686a0cf883fbface41acdcdeba44eb8805bb74e1 | [
"MIT"
] | 1 | 2021-04-10T08:39:09.000Z | 2021-04-10T08:39:09.000Z | src/affinity_matrix.py | vaishnavi-sridhar/CU_HIN | 686a0cf883fbface41acdcdeba44eb8805bb74e1 | [
"MIT"
] | 4 | 2021-03-03T04:38:45.000Z | 2022-03-01T22:35:48.000Z | from scipy.sparse import lil_matrix, csr_matrix, dia_matrix
import numpy as np
import math
import logging
def affinity_matrix(M: csr_matrix, threshold: float) -> csr_matrix:
""" Computes the affinity matrix for matrix M. Each row is compared to each
other row and a gaussian is used to calculate how close the rows are.
If the value is below a threshold, we set it to zero (which won't
be represented in the sparse matrix)
Arguments:
M: csr - The matrix to compute affinity on.
threshold: float - The threshold to use to zero out small values
Returns:
csr_matrix
"""
assert(M.shape[0] == M.shape[1]),("Expected square matrix")
assert(len(M.shape) == 2),("Expected square matrix")
n = M.shape[0]
A = lil_matrix((n,n))
for i in range(n):
mi = M[i,:].toarray()
for j in range(i):
if i != j:
x = mi - M[j,:].toarray()
x = np.linalg.norm(x, ord=2)
y = math.exp(-pow(x,2))
if y > threshold:
A[i,j] = y
A[j,i] = y
return A.tocsr()
def converge(M, Y, mu, tol):
assert(M.shape[0] == M.shape[1]),("Expected square matrix")
assert(len(M.shape) == 2),("Expected square matrix")
n = M.shape[0]
data = np.squeeze(np.asarray(M.sum(axis=1))) # sum the rows
offsets = np.array([0])
D = dia_matrix((data, offsets), shape=(n,n))
D = D.tocsr() # Convert to csr because can't use subscripting
for i in range(n):
D[i,i] = D[i,i] ** (-1/2)
S = D * M * D
alpha = 1/(1 + mu)
beta = mu/(1 + mu)
delta=tol+1 #Initializes change as larger than the tolerance to enter while loop.
F = Y
while delta>tol:
F0=F #Stores previous value of F for computation.
F = alpha * S.dot(F) + beta * Y
delta=np.linalg.norm(F-F0) #Computes magnitude of change from previous step.
logging.info("Tol " + str(tol) + " Delta " + str(delta))
return F
| 26.535211 | 83 | 0.618365 |
835f3c12327c66cacc06cb414cdba1c313ba6da7 | 377 | py | Python | tests/test_longest_valid_parentheses.py | stachenov/PyLeetCode | cb13700d428854eff46a762542a63d691578d5b6 | [
"Unlicense"
] | null | null | null | tests/test_longest_valid_parentheses.py | stachenov/PyLeetCode | cb13700d428854eff46a762542a63d691578d5b6 | [
"Unlicense"
] | null | null | null | tests/test_longest_valid_parentheses.py | stachenov/PyLeetCode | cb13700d428854eff46a762542a63d691578d5b6 | [
"Unlicense"
] | null | null | null | import pytest
from problems.longest_valid_parentheses import Solution
@pytest.mark.parametrize("s, expected", [
("", 0),
(")(", 0),
("()", 2),
("(())", 4),
("()()", 4),
("(()()", 4),
(")()()", 4),
("()())()", 4),
("()(())", 6),
])
def test_longestValidParentheses(s, expected):
assert Solution().longestValidParentheses(s) == expected
| 20.944444 | 60 | 0.498674 |
fac59c03dc212aee154818f10cf7e1638716bbf4 | 21,806 | py | Python | bw2io/extractors/simapro_csv.py | brightway-lca/brightway2-io-copy | 8383adc2f0cb06852f689fb2aab62d5a29f41130 | [
"BSD-3-Clause"
] | null | null | null | bw2io/extractors/simapro_csv.py | brightway-lca/brightway2-io-copy | 8383adc2f0cb06852f689fb2aab62d5a29f41130 | [
"BSD-3-Clause"
] | 3 | 2020-03-10T11:08:18.000Z | 2020-03-10T11:09:00.000Z | bw2io/extractors/simapro_csv.py | brightway-lca/brightway2-io-copy | 8383adc2f0cb06852f689fb2aab62d5a29f41130 | [
"BSD-3-Clause"
] | null | null | null | import csv
import math
import os
import re
import uuid
from numbers import Number
from bw2data.logs import close_log, get_io_logger
from bw2parameters import ParameterSet
from stats_arrays import (
LognormalUncertainty,
NormalUncertainty,
TriangularUncertainty,
UndefinedUncertainty,
UniformUncertainty,
)
from ..compatibility import SIMAPRO_BIOSPHERE
from ..strategies.simapro import normalize_simapro_formulae
INTRODUCTION = """Starting SimaPro import:
\tFilepath: %s
\tDelimiter: %s
\tName: %s
"""
SIMAPRO_TECHNOSPHERE = {
"Avoided products",
"Electricity/heat",
"Materials/fuels",
"Waste to treatment",
}
SIMAPRO_PRODUCTS = {"Products", "Waste treatment"}
SIMAPRO_END_OF_DATASETS = {
"Database Calculated parameters",
"Database Input parameters",
"Literature reference",
"Project Input parameters",
"Project Calculated parameters",
"Quantities",
"Units",
}
class EndOfDatasets(Exception):
pass
def to_number(obj):
try:
return float(obj.replace(",", ".").strip())
except (ValueError, SyntaxError):
# Sometimes allocation or ref product specific as percentage
if "%" in obj:
return float(obj.replace("%", "").strip()) / 100.0
try:
# Eval for simple expressions like "1/2"
return float(eval(obj.replace(",", ".").strip()))
except NameError:
# Formula with a variable which isn't in scope - raises NameError
return obj
except SyntaxError:
# Unit string like "ha a" raises a syntax error when evaled
return obj
except TypeError:
# Formulas with parameters or units that are Python built-in function like "min" (can be a parameter or a unit) raises TypeError
return obj
# \x7f if ascii delete - where does it come from?
strip_whitespace_and_delete = (
lambda obj: obj.replace("\x7f", "").strip() if isinstance(obj, str) else obj
)
uppercase_expression = (
"(?:" # Don't capture this group
"^" # Match the beginning of the string
"|" # Or
"[^a-zA-Z_])" # Anything other than a letter or underscore. SimaPro is limited to ASCII characters
"(?P<variable>{})" # The variable name string will be substituted here
"(?:[^a-zA-Z_]|$)" # Match anything other than a letter or underscore, or the end of the line
)
def replace_with_uppercase(string, names, precompiled):
"""Replace all occurrences of elements of ``names`` in ``string`` with their uppercase equivalents.
``names`` is a list of variable name strings that should already all be uppercase.
Returns a modified ``string``."""
for name in names:
for result in precompiled[name].findall(string):
string = string.replace(result, name)
return string
class SimaProCSVExtractor(object):
@classmethod
def extract(cls, filepath, delimiter=";", name=None, encoding="cp1252"):
assert os.path.exists(filepath), "Can't find file %s" % filepath
log, logfile = get_io_logger("SimaPro-extractor")
log.info(
INTRODUCTION
% (
filepath,
repr(delimiter),
name,
)
)
with open(filepath, "r", encoding=encoding) as csv_file:
reader = csv.reader(csv_file, delimiter=delimiter)
lines = [
[strip_whitespace_and_delete(obj) for obj in line] for line in reader
]
# Check if valid SimaPro file
assert (
"SimaPro" in lines[0][0] or "CSV separator" in lines[0][0]
), "File is not valid SimaPro export"
project_name = name or cls.get_project_name(lines)
datasets = []
project_metadata = cls.get_project_metadata(lines)
global_parameters, global_precompiled = cls.get_global_parameters(
lines, project_metadata
)
index = cls.get_next_process_index(lines, 0)
while True:
try:
ds, index = cls.read_data_set(
lines,
index,
project_name,
filepath,
global_parameters,
project_metadata,
global_precompiled,
)
datasets.append(ds)
index = cls.get_next_process_index(lines, index)
except EndOfDatasets:
break
close_log(log)
return datasets, global_parameters, project_metadata
@classmethod
def get_next_process_index(cls, data, index):
while True:
try:
if data[index] and data[index][0] in SIMAPRO_END_OF_DATASETS:
raise EndOfDatasets
elif data[index] and data[index][0] == "Process":
return index + 1
except IndexError:
# File ends without extra metadata
raise EndOfDatasets
index += 1
@classmethod
def get_project_metadata(cls, data):
meta = {}
for line in data:
if not line:
return meta
elif ":" not in line[0]:
continue
if not len(line) == 1:
raise ValueError("Can't understand metadata line {}".format(line))
assert line[0][0] == "{" and line[0][-1] == "}"
line = line[0][1:-1].split(":")
key, value = line[0], ":".join(line[1:])
meta[key.strip()] = value.strip()
@classmethod
def get_global_parameters(cls, data, pm):
current, parameters = None, []
for line in data:
if not line: # Blank line, end of section
current = None
elif line[0] in {"Database Input parameters", "Project Input parameters"}:
current = "input"
elif line[0] in {
"Database Calculated parameters",
"Project Calculated parameters",
}:
current = "calculated"
elif current is None:
continue
elif current == "input":
parameters.append(cls.parse_input_parameter(line))
elif current == "calculated":
parameters.append(cls.parse_calculated_parameter(line, pm))
else:
raise ValueError("This should never happen")
# Extract name and uppercase
parameters = {obj.pop("name").upper(): obj for obj in parameters}
global_precompiled = {
name: re.compile(uppercase_expression.format(name), flags=re.IGNORECASE)
for name in parameters
}
# Change all formula values to uppercase if referencing global parameters
for obj in parameters.values():
if "formula" in obj:
obj["formula"] = replace_with_uppercase(
obj["formula"], parameters, global_precompiled
)
ParameterSet(parameters).evaluate_and_set_amount_field()
return parameters, global_precompiled
@classmethod
def get_project_name(cls, data):
for line in data[:25]:
if not line:
continue
elif "{Project:" in line[0]:
return line[0][9:-1].strip()
# What the holy noodly appendage
# All other metadata in English, only this term
# translated into French‽
elif "{Projet:" in line[0]:
return line[0][9:-1].strip()
@classmethod
def invalid_uncertainty_data(cls, amount, kind, field1, field2, field3):
if kind == "Lognormal" and (not amount or field1 == "0"):
return True
@classmethod
def create_distribution(cls, amount, kind, field1, field2, field3):
amount = to_number(amount)
if kind == "Undefined":
return {
"uncertainty type": UndefinedUncertainty.id,
"loc": amount,
"amount": amount,
}
elif cls.invalid_uncertainty_data(amount, kind, field1, field2, field3):
# TODO: Log invalid data?
return {
"uncertainty type": UndefinedUncertainty.id,
"loc": amount,
"amount": amount,
}
elif kind == "Lognormal":
return {
"uncertainty type": LognormalUncertainty.id,
"scale": math.log(math.sqrt(to_number(field1))),
"loc": math.log(abs(amount)),
"negative": amount < 0,
"amount": amount,
}
elif kind == "Normal":
return {
"uncertainty type": NormalUncertainty.id,
"scale": math.sqrt(to_number(field1)),
"loc": amount,
"negative": amount < 0,
"amount": amount,
}
elif kind == "Triangle":
return {
"uncertainty type": TriangularUncertainty.id,
"minimum": to_number(field2),
"maximum": to_number(field3),
"loc": amount,
"negative": amount < 0,
"amount": amount,
}
elif kind == "Uniform":
return {
"uncertainty type": UniformUncertainty.id,
"minimum": to_number(field2),
"maximum": to_number(field3),
"loc": amount,
"negative": amount < 0,
"amount": amount,
}
else:
raise ValueError("Unknown uncertainty type: {}".format(kind))
@classmethod
def parse_calculated_parameter(cls, line, pm):
"""Parse line in `Calculated parameters` section.
0. name
1. formula
2. comment
Can include multiline comment in TSV.
"""
return {
"name": line[0],
"formula": normalize_simapro_formulae(line[1], pm),
"comment": "; ".join([x for x in line[2:] if x]),
}
@classmethod
def parse_input_parameter(cls, line):
"""Parse line in `Input parameters` section.
0. name
1. value (not formula)
2. uncertainty type
3. uncert. param.
4. uncert. param.
5. uncert. param.
6. hidden ("Yes" or "No" - we ignore)
7. comment
"""
ds = cls.create_distribution(*line[1:6])
ds.update({"name": line[0], "comment": "; ".join([x for x in line[7:] if x])})
return ds
@classmethod
def parse_biosphere_flow(cls, line, category, pm):
"""Parse biosphere flow line.
0. name
1. subcategory
2. unit
3. value or formula
4. uncertainty type
5. uncert. param.
6. uncert. param.
7. uncert. param.
8. comment
However, sometimes the value is in index 2, and the unit in index 3. Because why not! We assume default ordering unless we find a number in index 2.
"""
unit, amount = line[2], line[3]
if isinstance(to_number(line[2]), Number):
unit, amount = amount, unit
is_formula = not isinstance(to_number(amount), Number)
if is_formula:
ds = {"formula": normalize_simapro_formulae(amount, pm)}
else:
ds = cls.create_distribution(amount, *line[4:8])
ds.update(
{
"name": line[0],
"categories": (category, line[1]),
"unit": unit,
"comment": "; ".join([x for x in line[8:] if x]),
"type": "biosphere",
}
)
return ds
@classmethod
def parse_input_line(cls, line, category, pm):
"""Parse technosphere input line.
0. name
1. unit
2. value or formula
3. uncertainty type
4. uncert. param.
5. uncert. param.
6. uncert. param.
7. comment
However, sometimes the value is in index 1, and the unit in index 2. Because why not! We assume default ordering unless we find a number in index 1.
"""
unit, amount = line[1], line[2]
if isinstance(to_number(line[1]), Number):
unit, amount = amount, unit
is_formula = not isinstance(to_number(amount), Number)
if is_formula:
ds = {"formula": normalize_simapro_formulae(amount, pm)}
else:
ds = cls.create_distribution(amount, *line[3:7])
ds.update(
{
"categories": (category,),
"name": line[0],
"unit": unit,
"comment": "; ".join([x for x in line[7:] if x]),
"type": (
"substitution" if category == "Avoided products" else "technosphere"
),
}
)
return ds
@classmethod
def parse_final_waste_flow(cls, line, pm):
"""Parse final wate flow line.
0: name
1: subcategory?
2: unit
3. value or formula
4. uncertainty type
5. uncert. param.
6. uncert. param.
7. uncert. param.
However, sometimes the value is in index 2, and the unit in index 3. Because why not! We assume default ordering unless we find a number in index 2.
"""
unit, amount = line[2], line[3]
if isinstance(to_number(line[2]), Number):
unit, amount = amount, unit
is_formula = not isinstance(to_number(amount), Number)
if is_formula:
ds = {"formula": normalize_simapro_formulae(amount, pm)}
else:
ds = cls.create_distribution(amount, *line[4:8])
ds.update(
{
"name": line[0],
"categories": ("Final waste flows", line[1])
if line[1]
else ("Final waste flows",),
"unit": unit,
"comment": "; ".join([x for x in line[8:] if x]),
"type": "technosphere",
}
)
return ds
@classmethod
def parse_reference_product(cls, line, pm):
"""Parse reference product line.
0. name
1. unit
2. value or formula
3. allocation
4. waste type
5. category (separated by \\)
6. comment
However, sometimes the value is in index 1, and the unit in index 2. Because why not! We assume default ordering unless we find a number in index 1.
"""
unit, amount = line[1], line[2]
if isinstance(to_number(line[1]), Number):
unit, amount = amount, unit
is_formula = not isinstance(to_number(amount), Number)
if is_formula:
ds = {"formula": normalize_simapro_formulae(amount, pm)}
else:
ds = {"amount": to_number(amount)}
ds.update(
{
"name": line[0],
"unit": unit,
"allocation": to_number(line[3]),
"categories": tuple(line[5].split("\\")),
"comment": "; ".join([x for x in line[6:] if x]),
"type": "production",
}
)
return ds
@classmethod
def parse_waste_treatment(cls, line, pm):
"""Parse reference product line.
0. name
1. unit
2. value or formula
3. waste type
4. category (separated by \\)
5. comment
"""
is_formula = not isinstance(to_number(line[2]), Number)
if is_formula:
ds = {"formula": normalize_simapro_formulae(line[2], pm)}
else:
ds = {"amount": to_number(line[2])}
ds.update(
{
"name": line[0],
"unit": line[1],
"categories": tuple(line[4].split("\\")),
"comment": "; ".join([x for x in line[5:] if x]),
"type": "production",
}
)
return ds
@classmethod
def read_dataset_metadata(cls, data, index):
metadata = {}
while True:
if not data[index]:
pass
elif data[index] and data[index][0] in SIMAPRO_PRODUCTS:
return metadata, index
elif data[index] and data[index + 1] and data[index][0]:
metadata[data[index][0]] = data[index + 1][0]
index += 1
index += 1
@classmethod
def read_data_set(cls, data, index, db_name, filepath, gp, pm, global_precompiled):
metadata, index = cls.read_dataset_metadata(data, index)
# `index` is now the `Products` or `Waste Treatment` line
ds = {
"simapro metadata": metadata,
"code": metadata.get("Process identifier") or uuid.uuid4().hex,
"exchanges": [],
"parameters": [],
"database": db_name,
"filename": filepath,
"type": "process",
}
while not data[index] or data[index][0] != "End":
if not data[index] or not data[index][0]:
index += 1
elif data[index][0] in SIMAPRO_TECHNOSPHERE:
category = data[index][0]
index += 1 # Advance to data lines
while (
index < len(data) and data[index] and data[index][0]
): # Stop on blank line
ds["exchanges"].append(
cls.parse_input_line(data[index], category, pm)
)
index += 1
elif data[index][0] in SIMAPRO_BIOSPHERE:
category = data[index][0]
index += 1 # Advance to data lines
while (
index < len(data) and data[index] and data[index][0]
): # Stop on blank line
ds["exchanges"].append(
cls.parse_biosphere_flow(data[index], category, pm)
)
index += 1
elif data[index][0] == "Calculated parameters":
index += 1 # Advance to data lines
while (
index < len(data) and data[index] and data[index][0]
): # Stop on blank line
ds["parameters"].append(
cls.parse_calculated_parameter(data[index], pm)
)
index += 1
elif data[index][0] == "Input parameters":
index += 1 # Advance to data lines
while (
index < len(data) and data[index] and data[index][0]
): # Stop on blank line
ds["parameters"].append(cls.parse_input_parameter(data[index]))
index += 1
elif data[index][0] == "Products":
index += 1 # Advance to data lines
while (
index < len(data) and data[index] and data[index][0]
): # Stop on blank line
ds["exchanges"].append(cls.parse_reference_product(data[index], pm))
index += 1
elif data[index][0] == "Waste treatment":
index += 1 # Advance to data lines
while (
index < len(data) and data[index] and data[index][0]
): # Stop on blank line
ds["exchanges"].append(cls.parse_waste_treatment(data[index], pm))
index += 1
elif data[index][0] == "Final waste flows":
index += 1 # Advance to data lines
while (
index < len(data) and data[index] and data[index][0]
): # Stop on blank line
ds["exchanges"].append(cls.parse_final_waste_flow(data[index], pm))
index += 1
elif data[index][0] in SIMAPRO_END_OF_DATASETS:
# Don't care about processing steps below, as no dataset
# was extracted
raise EndOfDatasets
else:
index += 1
if index == len(data):
break
# Extract name and uppercase
ds["parameters"] = {obj.pop("name").upper(): obj for obj in ds["parameters"]}
local_precompiled = {
name: re.compile(uppercase_expression.format(name), flags=re.IGNORECASE)
for name in ds["parameters"]
}
# Change all parameter formula values to uppercase if referencing
# global or local parameters
for obj in ds["parameters"].values():
if "formula" in obj:
obj["formula"] = replace_with_uppercase(
obj["formula"], ds["parameters"], local_precompiled
)
obj["formula"] = replace_with_uppercase(
obj["formula"], gp, global_precompiled
)
# Change all exchange values to uppercase if referencing
# global or local parameters
for obj in ds["exchanges"]:
if "formula" in obj:
obj["formula"] = replace_with_uppercase(
obj["formula"], ds["parameters"], local_precompiled
)
obj["formula"] = replace_with_uppercase(
obj["formula"], gp, global_precompiled
)
ps = ParameterSet(
ds["parameters"], {key: value["amount"] for key, value in gp.items()}
)
# Changes in-place
ps(ds["exchanges"])
if not ds["parameters"]:
del ds["parameters"]
return ds, index
| 34.286164 | 156 | 0.523342 |
38f4ce85f6a073f6640092229423af2abe2fbdaf | 3,375 | py | Python | scripts/slave/recipes/infra/luci_gae.py | yjbanov/chromium_build | 22e3872f14dbf367cd787caa638f3ac948eac7d7 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipes/infra/luci_gae.py | yjbanov/chromium_build | 22e3872f14dbf367cd787caa638f3ac948eac7d7 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipes/infra/luci_gae.py | yjbanov/chromium_build | 22e3872f14dbf367cd787caa638f3ac948eac7d7 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'depot_tools/bot_update',
'depot_tools/gclient',
'depot_tools/git',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'depot_tools/tryserver',
]
def _run_presubmit(api, patch_root, bot_update_step):
upstream = bot_update_step.json.output['properties'].get(
api.gclient.c.got_revision_mapping[
'infra/go/src/github.com/luci/gae'])
# The presubmit must be run with proper Go environment.
# infra/go/env.py takes care of this.
presubmit_cmd = [
'python', # env.py will replace with this its sys.executable.
api.path['depot_tools'].join('presubmit_support.py'),
'--root', api.path['slave_build'].join(patch_root),
'--commit',
'--verbose', '--verbose',
'--issue', api.properties['issue'],
'--patchset', api.properties['patchset'],
'--skip_canned', 'CheckRietveldTryJobExecution',
'--skip_canned', 'CheckTreeIsOpen',
'--skip_canned', 'CheckBuildbotPendingBuilds',
'--rietveld_url', api.properties['rietveld'],
'--rietveld_fetch',
'--upstream', upstream,
'--trybot-json', api.json.output(),
'--rietveld_email', ''
]
api.python('presubmit', api.path['checkout'].join('go', 'env.py'),
presubmit_cmd, env={'PRESUBMIT_BUILDER': '1'})
def _commit_change(api, patch_root):
api.git('-c', 'user.email=commit-bot@chromium.org',
'-c', 'user.name=The Commit Bot',
'commit', '-a', '-m', 'Committed patch',
name='commit git patch',
cwd=api.path['slave_build'].join(patch_root))
def RunSteps(api):
api.gclient.set_config('luci_gae')
# patch_root must match the luci/gae repo, not infra checkout.
for path in api.gclient.c.got_revision_mapping:
if 'github.com/luci/gae' in path:
patch_root = path
break
bot_update_step = api.bot_update.ensure_checkout(force=True,
patch_root=patch_root)
is_presubmit = 'presubmit' in api.properties.get('buildername', '').lower()
if is_presubmit:
_commit_change(api, patch_root)
api.gclient.runhooks()
# This downloads the third parties, so that the next step doesn't have junk
# output in it.
api.python(
'go third parties',
api.path['checkout'].join('go', 'env.py'),
['go', 'version'])
if is_presubmit:
with api.tryserver.set_failure_hash():
_run_presubmit(api, patch_root, bot_update_step)
else:
api.python(
'go build',
api.path['checkout'].join('go', 'env.py'),
['go', 'build', 'github.com/luci/gae/...'])
api.python(
'go test',
api.path['checkout'].join('go', 'env.py'),
['go', 'test', 'github.com/luci/gae/...'])
def GenTests(api):
yield (
api.test('luci_gae') +
api.properties.git_scheduled(
buildername='luci-gae-linux64',
mastername='chromium.infra',
repository='https://chromium.googlesource.com/external/github.com/luci/gae',
)
)
yield (
api.test('presubmit_try_job') +
api.properties.tryserver(
mastername='tryserver.infra',
buildername='Luci-GAE Presubmit',
) + api.step_data('presubmit', api.json.output([[]]))
)
| 32.142857 | 84 | 0.64 |
0eb0ceda657bc4d4e96ed44af26a131785dcc574 | 638 | py | Python | oops_fhir/r4/value_set/restful_capability_mode.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/restful_capability_mode.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/restful_capability_mode.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.restful_capability_mode import (
RestfulCapabilityMode as RestfulCapabilityMode_,
)
__all__ = ["RestfulCapabilityMode"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class RestfulCapabilityMode(RestfulCapabilityMode_):
"""
RestfulCapabilityMode
The mode of a RESTful capability statement.
Status: active - Version: 4.0.1
http://hl7.org/fhir/ValueSet/restful-capability-mode
"""
class Meta:
resource = _resource
| 20.580645 | 69 | 0.755486 |
5fa98d222f0b15357816897d8dbba04ae1f98210 | 1,925 | py | Python | vectorhub/encoders/image/tfhub/bit.py | vector-ai/vectorhub | 17c2f342cef2ff7bcc02c8f3914e79ad92071a9e | [
"Apache-2.0"
] | 385 | 2020-10-26T13:12:11.000Z | 2021-10-07T15:14:48.000Z | vectorhub/encoders/image/tfhub/bit.py | vector-ai/vectorhub | 17c2f342cef2ff7bcc02c8f3914e79ad92071a9e | [
"Apache-2.0"
] | 24 | 2020-10-29T13:16:31.000Z | 2021-08-31T06:47:33.000Z | vectorhub/encoders/image/tfhub/bit.py | vector-ai/vectorhub | 17c2f342cef2ff7bcc02c8f3914e79ad92071a9e | [
"Apache-2.0"
] | 45 | 2020-10-29T15:25:19.000Z | 2021-09-05T21:50:57.000Z | from datetime import date
from ....base import catch_vector_errors
from ....doc_utils import ModelDefinition
from ....import_utils import *
from ..base import BaseImage2Vec
if is_all_dependency_installed('encoders-image-tfhub'):
import tensorflow as tf
import tensorflow_hub as hub
BITModelDefinition = ModelDefinition(markdown_filepath='encoders/image/tfhub/bit')
__doc__ = BITModelDefinition.create_docs()
class BitSmall2Vec(BaseImage2Vec):
definition = BITModelDefinition
urls = {
'https://tfhub.dev/google/bit/s-r50x1/1': {"vector_length":2048}, # 2048 output shape
'https://tfhub.dev/google/bit/s-r50x3/1': {"vector_length":6144}, # 6144 output shape
'https://tfhub.dev/google/bit/s-r101x1/1': {"vector_length":2048}, # 2048 output shape
'https://tfhub.dev/google/bit/s-r101x3/1': {"vector_length":6144}, # 6144 output shape
'https://tfhub.dev/google/bit/s-r152x4/1': {"vector_length":8192}, # 8192 output shape
}
def __init__(self, model_url: str = "https://tfhub.dev/google/bit/s-r50x1/1"):
self.validate_model_url(model_url, list(self.urls.keys()))
self.init(model_url)
self.vector_length = self.urls[model_url]["vector_length"]
def init(self, model_url: str):
self.model_url = model_url
self.model_name = self.model_url.replace(
'https://tfhub.dev/google/', '').replace('/', '_')
self.model = hub.load(self.model_url)
@catch_vector_errors
def encode(self, image):
if isinstance(image, str):
image = self.read(image)
return self.model([image]).numpy().tolist()[0]
@catch_vector_errors
def bulk_encode(self, images):
"""
Bulk encode. Chunk size should be specified outside of the images.
"""
# TODO: Change from list comprehension to properly read
return [self.encode(x) for x in images]
| 39.285714 | 96 | 0.668571 |
c0fe2b35ec024dc5f622e32820ec60f6e29e2e2c | 9,343 | py | Python | src/sage/plot/density_plot.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | null | null | null | src/sage/plot/density_plot.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | null | null | null | src/sage/plot/density_plot.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | 1 | 2020-07-24T12:20:37.000Z | 2020-07-24T12:20:37.000Z | """
Density Plots
"""
#*****************************************************************************
# Copyright (C) 2006 Alex Clemesha <clemesha@gmail.com>,
# William Stein <wstein@gmail.com>,
# 2008 Mike Hansen <mhansen@gmail.com>,
# Arnaud Bergeron <abergeron@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.plot.primitive import GraphicPrimitive
from sage.misc.decorators import options
from sage.plot.colors import get_cmap
from sage.misc.misc import xsrange
class DensityPlot(GraphicPrimitive):
"""
Primitive class for the density plot graphics type. See
``density_plot?`` for help actually doing density plots.
INPUT:
- ``xy_data_array`` - list of lists giving evaluated values of the
function on the grid
- ``xrange`` - tuple of 2 floats indicating range for horizontal direction
- ``yrange`` - tuple of 2 floats indicating range for vertical direction
- ``options`` - dict of valid plot options to pass to constructor
EXAMPLES:
Note this should normally be used indirectly via ``density_plot``::
sage: from sage.plot.density_plot import DensityPlot
sage: D = DensityPlot([[1,3],[2,4]],(1,2),(2,3),options={})
sage: D
DensityPlot defined by a 2 x 2 data grid
sage: D.yrange
(2, 3)
sage: D.options()
{}
TESTS:
We test creating a density plot::
sage: x,y = var('x,y')
sage: density_plot(x^2-y^3+10*sin(x*y), (x, -4, 4), (y, -4, 4),plot_points=121,cmap='hsv')
Graphics object consisting of 1 graphics primitive
"""
def __init__(self, xy_data_array, xrange, yrange, options):
"""
Initializes base class DensityPlot.
EXAMPLES::
sage: x,y = var('x,y')
sage: D = density_plot(x^2-y^3+10*sin(x*y), (x, -4, 4), (y, -4, 4),plot_points=121,cmap='hsv')
sage: D[0].xrange
(-4.0, 4.0)
sage: D[0].options()['plot_points']
121
"""
self.xrange = xrange
self.yrange = yrange
self.xy_data_array = xy_data_array
self.xy_array_row = len(xy_data_array)
self.xy_array_col = len(xy_data_array[0])
GraphicPrimitive.__init__(self, options)
def get_minmax_data(self):
"""
Returns a dictionary with the bounding box data.
EXAMPLES::
sage: x,y = var('x,y')
sage: f(x, y) = x^2 + y^2
sage: d = density_plot(f, (3, 6), (3, 6))[0].get_minmax_data()
sage: d['xmin']
3.0
sage: d['ymin']
3.0
"""
from sage.plot.plot import minmax_data
return minmax_data(self.xrange, self.yrange, dict=True)
def _allowed_options(self):
"""
Return the allowed options for the DensityPlot class.
TESTS::
sage: isinstance(density_plot(x, (-2,3), (1,10))[0]._allowed_options(), dict)
True
"""
return {'plot_points':'How many points to use for plotting precision',
'cmap':"""the name of a predefined colormap,
a list of colors or an instance of a
matplotlib Colormap. Type: import matplotlib.cm; matplotlib.cm.datad.keys()
for available colormap names.""",
'interpolation':'What interpolation method to use'}
def _repr_(self):
"""
String representation of DensityrPlot primitive.
EXAMPLES::
sage: x,y = var('x,y')
sage: D = density_plot(x^2-y^2,(x,-2,2),(y,-2,2))
sage: d = D[0]; d
DensityPlot defined by a 25 x 25 data grid
"""
return "DensityPlot defined by a %s x %s data grid"%(self.xy_array_row, self.xy_array_col)
def _render_on_subplot(self, subplot):
"""
TESTS:
A somewhat random plot, but fun to look at::
sage: x,y = var('x,y')
sage: density_plot(x^2-y^3+10*sin(x*y), (x, -4, 4), (y, -4, 4),plot_points=121,cmap='hsv')
Graphics object consisting of 1 graphics primitive
"""
options = self.options()
cmap = get_cmap(options['cmap'])
x0,x1 = float(self.xrange[0]), float(self.xrange[1])
y0,y1 = float(self.yrange[0]), float(self.yrange[1])
subplot.imshow(self.xy_data_array, origin='lower', cmap=cmap, extent=(x0,x1,y0,y1), interpolation=options['interpolation'])
@options(plot_points=25, cmap='gray', interpolation='catrom')
def density_plot(f, xrange, yrange, **options):
r"""
``density_plot`` takes a function of two variables, `f(x,y)`
and plots the height of of the function over the specified
``xrange`` and ``yrange`` as demonstrated below.
``density_plot(f, (xmin, xmax), (ymin, ymax), ...)``
INPUT:
- ``f`` -- a function of two variables
- ``(xmin, xmax)`` -- 2-tuple, the range of ``x`` values OR 3-tuple
``(x,xmin,xmax)``
- ``(ymin, ymax)`` -- 2-tuple, the range of ``y`` values OR 3-tuple
``(y,ymin,ymax)``
The following inputs must all be passed in as named parameters:
- ``plot_points`` -- integer (default: 25); number of points to plot
in each direction of the grid
- ``cmap`` -- a colormap (type ``cmap_help()`` for more information).
- ``interpolation`` -- string (default: ``'catrom'``), the interpolation
method to use: ``'bilinear'``, ``'bicubic'``, ``'spline16'``,
``'spline36'``, ``'quadric'``, ``'gaussian'``, ``'sinc'``,
``'bessel'``, ``'mitchell'``, ``'lanczos'``, ``'catrom'``,
``'hermite'``, ``'hanning'``, ``'hamming'``, ``'kaiser'``
EXAMPLES:
Here we plot a simple function of two variables. Note that
since the input function is an expression, we need to explicitly
declare the variables in 3-tuples for the range::
sage: x,y = var('x,y')
sage: density_plot(sin(x)*sin(y), (x, -2, 2), (y, -2, 2))
Graphics object consisting of 1 graphics primitive
Here we change the ranges and add some options; note that here
``f`` is callable (has variables declared), so we can use 2-tuple ranges::
sage: x,y = var('x,y')
sage: f(x,y) = x^2*cos(x*y)
sage: density_plot(f, (x,-10,5), (y, -5,5), interpolation='sinc', plot_points=100)
Graphics object consisting of 1 graphics primitive
An even more complicated plot::
sage: x,y = var('x,y')
sage: density_plot(sin(x^2 + y^2)*cos(x)*sin(y), (x, -4, 4), (y, -4, 4), cmap='jet', plot_points=100)
Graphics object consisting of 1 graphics primitive
This should show a "spotlight" right on the origin::
sage: x,y = var('x,y')
sage: density_plot(1/(x^10+y^10), (x, -10, 10), (y, -10, 10))
Graphics object consisting of 1 graphics primitive
Some elliptic curves, but with symbolic endpoints. In the first
example, the plot is rotated 90 degrees because we switch the
variables `x`, `y`::
sage: density_plot(y^2 + 1 - x^3 - x, (y,-pi,pi), (x,-pi,pi))
Graphics object consisting of 1 graphics primitive
::
sage: density_plot(y^2 + 1 - x^3 - x, (x,-pi,pi), (y,-pi,pi))
Graphics object consisting of 1 graphics primitive
Extra options will get passed on to show(), as long as they are valid::
sage: density_plot(log(x) + log(y), (x, 1, 10), (y, 1, 10), dpi=20)
Graphics object consisting of 1 graphics primitive
::
sage: density_plot(log(x) + log(y), (x, 1, 10), (y, 1, 10)).show(dpi=20) # These are equivalent
TESTS:
Check that :trac:`15315` is fixed, i.e., density_plot respects the
``aspect_ratio`` parameter. Without the fix, it looks like a thin line
of width a few mm. With the fix it should look like a nice fat layered
image::
sage: density_plot((x*y)^(1/2), (x,0,3), (y,0,500), aspect_ratio=.01)
Graphics object consisting of 1 graphics primitive
Default ``aspect_ratio`` is ``"automatic"``, and that should work too::
sage: density_plot((x*y)^(1/2), (x,0,3), (y,0,500))
Graphics object consisting of 1 graphics primitive
"""
from sage.plot.all import Graphics
from sage.plot.misc import setup_for_eval_on_grid
g, ranges = setup_for_eval_on_grid([f], [xrange, yrange], options['plot_points'])
g = g[0]
xrange,yrange=[r[:2] for r in ranges]
xy_data_array = [[g(x, y) for x in xsrange(*ranges[0], include_endpoint=True)]
for y in xsrange(*ranges[1], include_endpoint=True)]
g = Graphics()
g._set_extra_kwds(Graphics._extract_kwds_for_show(options, ignore=['xmin', 'xmax']))
g.add_primitive(DensityPlot(xy_data_array, xrange, yrange, options))
return g
| 35.660305 | 131 | 0.586321 |
18b1e0e91d766fe67a717be42b69fd4ae0baa0e5 | 52,007 | py | Python | doc/conf.py | jhouck/mne-python | 95facbd1a28e471cf81e1d86735fa272a66d13d1 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | jhouck/mne-python | 95facbd1a28e471cf81e1d86735fa272a66d13d1 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | jhouck/mne-python | 95facbd1a28e471cf81e1d86735fa272a66d13d1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import gc
import os
import sys
import time
import warnings
from datetime import datetime, timezone
from distutils.version import LooseVersion
import numpy as np
import matplotlib
import sphinx
import sphinx_gallery
from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder
from numpydoc import docscrape
import mne
from mne.tests.test_docstring_parameters import error_ignores
from mne.utils import (linkcode_resolve, # noqa, analysis:ignore
_assert_no_instances, sizeof_fmt)
from mne.viz import Brain # noqa
if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'):
raise ImportError('Must have at least version 0.2 of sphinx-gallery, got '
f'{sphinx_gallery.__version__}')
matplotlib.use('agg')
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
# -- Project information -----------------------------------------------------
project = 'MNE'
td = datetime.now(tz=timezone.utc)
# We need to triage which date type we use so that incremental builds work
# (Sphinx looks at variable changes and rewrites all files if some change)
copyright = (
f'2012–{td.year}, MNE Developers. Last updated <time datetime="{td.isoformat()}" class="localized">{td.strftime("%Y-%m-%d %H:%M %Z")}</time>\n' # noqa: E501
'<script type="text/javascript">$(function () { $("time.localized").each(function () { var el = $(this); el.text(new Date(el.attr("datetime")).toLocaleString([], {dateStyle: "medium", timeStyle: "long"})); }); } )</script>') # noqa: E501
if os.getenv('MNE_FULL_DATE', 'false').lower() != 'true':
copyright = f'2012–{td.year}, MNE Developers. Last updated locally.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = mne.__version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'numpydoc',
'sphinx_gallery.gen_gallery',
'gen_commands',
'gh_substitutions',
'mne_substitutions',
'gen_names',
'sphinx_bootstrap_divs',
'sphinxcontrib.bibtex',
'sphinx_copybutton',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_includes']
# The suffix of source filenames.
source_suffix = '.rst'
# The main toctree document.
master_doc = 'index'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "py:obj"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# -- Sphinx-Copybutton configuration -----------------------------------------
copybutton_prompt_text = r">>> |\.\.\. |\$ "
copybutton_prompt_is_regexp = True
# -- Intersphinx configuration -----------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'numba': ('https://numba.pydata.org/numba-doc/latest', None),
'joblib': ('https://joblib.readthedocs.io/en/latest', None),
'mayavi': ('http://docs.enthought.com/mayavi/mayavi', None),
'nibabel': ('https://nipy.org/nibabel', None),
'nilearn': ('http://nilearn.github.io', None),
'surfer': ('https://pysurfer.github.io/', None),
'mne_bids': ('https://mne.tools/mne-bids/stable', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'seaborn': ('https://seaborn.pydata.org/', None),
'statsmodels': ('https://www.statsmodels.org/dev', None),
'patsy': ('https://patsy.readthedocs.io/en/latest', None),
'pyvista': ('https://docs.pyvista.org', None),
'imageio': ('https://imageio.readthedocs.io/en/latest', None),
'mne_realtime': ('https://mne.tools/mne-realtime', None),
'picard': ('https://pierreablin.github.io/picard/', None),
'qdarkstyle': ('https://qdarkstylesheet.readthedocs.io/en/latest', None),
'eeglabio': ('https://eeglabio.readthedocs.io/en/latest', None),
'dipy': ('https://dipy.org/documentation/1.4.0./',
'https://dipy.org/documentation/1.4.0./objects.inv/'),
}
# NumPyDoc configuration -----------------------------------------------------
# Define what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = True
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
# Python
'file-like': ':term:`file-like <python:file object>`',
# Matplotlib
'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`',
'color': ':doc:`color <matplotlib:api/colors_api>`',
'collection': ':doc:`collections <matplotlib:api/collections_api>`',
'Axes': 'matplotlib.axes.Axes',
'Figure': 'matplotlib.figure.Figure',
'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D',
'ColorbarBase': 'matplotlib.colorbar.ColorbarBase',
# Mayavi
'mayavi.mlab.Figure': 'mayavi.core.api.Scene',
'mlab.Figure': 'mayavi.core.api.Scene',
# sklearn
'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut',
# joblib
'joblib.Parallel': 'joblib.Parallel',
# nibabel
'Nifti1Image': 'nibabel.nifti1.Nifti1Image',
'Nifti2Image': 'nibabel.nifti2.Nifti2Image',
'SpatialImage': 'nibabel.spatialimages.SpatialImage',
# MNE
'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked',
'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces',
'SourceMorph': 'mne.SourceMorph',
'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout',
'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel',
'AverageTFR': 'mne.time_frequency.AverageTFR',
'EpochsTFR': 'mne.time_frequency.EpochsTFR',
'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA',
'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations',
'DigMontage': 'mne.channels.DigMontage',
'VectorSourceEstimate': 'mne.VectorSourceEstimate',
'VolSourceEstimate': 'mne.VolSourceEstimate',
'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate',
'MixedSourceEstimate': 'mne.MixedSourceEstimate',
'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate',
'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection',
'ConductorModel': 'mne.bem.ConductorModel',
'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed',
'InverseOperator': 'mne.minimum_norm.InverseOperator',
'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity',
'SourceMorph': 'mne.SourceMorph',
'Xdawn': 'mne.preprocessing.Xdawn',
'Report': 'mne.Report', 'Forward': 'mne.Forward',
'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge',
'Vectorizer': 'mne.decoding.Vectorizer',
'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter',
'TemporalFilter': 'mne.decoding.TemporalFilter',
'SSD': 'mne.decoding.SSD',
'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC',
'PSDEstimator': 'mne.decoding.PSDEstimator',
'LinearModel': 'mne.decoding.LinearModel',
'FilterEstimator': 'mne.decoding.FilterEstimator',
'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP',
'Beamformer': 'mne.beamformer.Beamformer',
'Transform': 'mne.transforms.Transform',
# dipy
'dipy.align.AffineMap': 'dipy.align.imaffine.AffineMap',
'dipy.align.DiffeomorphicMap': 'dipy.align.imwarp.DiffeomorphicMap',
}
numpydoc_xref_ignore = {
# words
'instance', 'instances', 'of', 'default', 'shape', 'or',
'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in',
'dtype', 'object', 'self.verbose',
# shapes
'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors',
'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups',
'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers',
'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q',
'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests',
'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features',
'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in',
'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks',
'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids',
'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out',
'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv',
'n_dipoles_fwd', 'n_picks_ref', 'n_coords', 'n_meg', 'n_good_meg',
'n_moments', 'n_patterns',
# Undocumented (on purpose)
'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi',
'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY',
'RawPersyst', 'RawNihon', 'RawNedf', 'RawHitachi',
# sklearn subclasses
'mapping', 'to', 'any',
# unlinkable
'mayavi.mlab.pipeline.surface',
'CoregFrame', 'Kit2FiffFrame', 'FiducialsFrame',
}
numpydoc_validate = True
numpydoc_validation_checks = {'all'} | set(error_ignores)
numpydoc_validation_exclude = { # set of regex
# dict subclasses
r'\.clear', r'\.get$', r'\.copy$', r'\.fromkeys', r'\.items', r'\.keys',
r'\.pop', r'\.popitem', r'\.setdefault', r'\.update', r'\.values',
# list subclasses
r'\.append', r'\.count', r'\.extend', r'\.index', r'\.insert', r'\.remove',
r'\.sort',
# we currently don't document these properly (probably okay)
r'\.__getitem__', r'\.__contains__', r'\.__hash__', r'\.__mul__',
r'\.__sub__', r'\.__add__', r'\.__iter__', r'\.__div__', r'\.__neg__',
# copied from sklearn
r'mne\.utils\.deprecated',
}
# -- Sphinx-gallery configuration --------------------------------------------
class Resetter(object):
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __init__(self):
self.t0 = time.time()
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __call__(self, gallery_conf, fname):
import matplotlib.pyplot as plt
try:
from pyvista import Plotter # noqa
except ImportError:
Plotter = None # noqa
try:
from pyvistaqt import BackgroundPlotter # noqa
except ImportError:
BackgroundPlotter = None # noqa
try:
from vtk import vtkPolyData # noqa
except ImportError:
vtkPolyData = None # noqa
from mne.viz.backends.renderer import backend
_Renderer = backend._Renderer if backend is not None else None
reset_warnings(gallery_conf, fname)
# in case users have interactive mode turned on in matplotlibrc,
# turn it off here (otherwise the build can be very slow)
plt.ioff()
plt.rcParams['animation.embed_limit'] = 30.
gc.collect()
_assert_no_instances(Brain, 'Brain') # calls gc.collect()
if Plotter is not None:
_assert_no_instances(Plotter, 'Plotter')
if BackgroundPlotter is not None:
_assert_no_instances(BackgroundPlotter, 'BackgroundPlotter')
if vtkPolyData is not None:
_assert_no_instances(vtkPolyData, 'vtkPolyData')
_assert_no_instances(_Renderer, '_Renderer')
# This will overwrite some Sphinx printing but it's useful
# for memory timestamps
if os.getenv('SG_STAMP_STARTS', '').lower() == 'true':
import psutil
process = psutil.Process(os.getpid())
mem = sizeof_fmt(process.memory_info().rss)
print(f'{time.time() - self.t0:6.1f} s : {mem}'.ljust(22))
examples_dirs = ['../tutorials', '../examples']
gallery_dirs = ['auto_tutorials', 'auto_examples']
os.environ['_MNE_BUILDING_DOC'] = 'true'
scrapers = ('matplotlib',)
try:
mne.viz.set_3d_backend(mne.viz.get_3d_backend())
except Exception:
report_scraper = None
else:
backend = mne.viz.get_3d_backend()
if backend == 'mayavi':
from traits.api import push_exception_handler
mlab = mne.utils._import_mlab()
# Do not pop up any mayavi windows while running the
# examples. These are very annoying since they steal the focus.
mlab.options.offscreen = True
# hack to initialize the Mayavi Engine
mlab.test_plot3d()
mlab.close()
scrapers += ('mayavi',)
push_exception_handler(reraise_exceptions=True)
elif backend in ('notebook', 'pyvistaqt'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
pyvista.OFF_SCREEN = False
brain_scraper = mne.viz._brain._BrainScraper()
scrapers += (brain_scraper, 'pyvista')
report_scraper = mne.report._ReportScraper()
scrapers += (report_scraper,)
del backend
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': dict(mne=None),
'examples_dirs': examples_dirs,
'subsection_order': ExplicitOrder(['../examples/io/',
'../examples/simulation/',
'../examples/preprocessing/',
'../examples/visualization/',
'../examples/time_frequency/',
'../examples/stats/',
'../examples/decoding/',
'../examples/connectivity/',
'../examples/forward/',
'../examples/inverse/',
'../examples/realtime/',
'../examples/datasets/',
'../tutorials/intro/',
'../tutorials/io/',
'../tutorials/raw/',
'../tutorials/preprocessing/',
'../tutorials/epochs/',
'../tutorials/evoked/',
'../tutorials/time-freq/',
'../tutorials/forward/',
'../tutorials/inverse/',
'../tutorials/stats-sensor-space/',
'../tutorials/stats-source-space/',
'../tutorials/machine-learning/',
'../tutorials/clinical/',
'../tutorials/simulation/',
'../tutorials/sample-datasets/',
'../tutorials/misc/']),
'gallery_dirs': gallery_dirs,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning
'thumbnail_size': (160, 112),
'remove_config_comments': True,
'min_reported_time': 1.,
'abort_on_example_error': False,
'reset_modules': ('matplotlib', Resetter()), # called w/each script
'image_scrapers': scrapers,
'show_memory': not sys.platform.startswith('win'),
'line_numbers': False, # messes with style
'within_subsection_order': FileNameSortKey,
'capture_repr': ('_repr_html_',),
'junit': os.path.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'),
'matplotlib_animations': True,
'compress_images': ('images', 'thumbnails'),
'filename_pattern': '^((?!sgskip).)*$',
}
# Files were renamed from plot_* with:
# find . -type f -name 'plot_*.py' -exec sh -c 'x="{}"; xn=`basename "${x}"`; git mv "$x" `dirname "${x}"`/${xn:5}' \; # noqa
def append_attr_meth_examples(app, what, name, obj, options, lines):
"""Append SG examples backreferences to method and attr docstrings."""
# NumpyDoc nicely embeds method and attribute docstrings for us, but it
# does not respect the autodoc templates that would otherwise insert
# the .. include:: lines, so we need to do it.
# Eventually this could perhaps live in SG.
if what in ('attribute', 'method'):
size = os.path.getsize(os.path.join(
os.path.dirname(__file__), 'generated', '%s.examples' % (name,)))
if size > 0:
lines += """
.. _sphx_glr_backreferences_{1}:
.. rubric:: Examples using ``{0}``:
.. minigallery:: {1}
""".format(name.split('.')[-1], name).split('\n')
# -- Other extension configuration -------------------------------------------
linkcheck_request_headers = dict(user_agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36') # noqa: E501
linkcheck_ignore = [ # will be compiled to regex
r'https://datashare.is.ed.ac.uk/handle/10283/2189\?show=full', # noqa Max retries exceeded with url: /handle/10283/2189?show=full (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1123)')))
'https://doi.org/10.1002/mds.870120629', # Read timed out.
'https://doi.org/10.1088/0031-9155/32/1/004', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/40/3/001', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/51/7/008', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0967-3334/22/4/305', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/1741-2552/aacfe4', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1093/sleep/18.7.557', # noqa 403 Client Error: Forbidden for url: https://academic.oup.com/sleep/article-lookup/doi/10.1093/sleep/18.7.557
'https://doi.org/10.1162/089976699300016719', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/11/2/417-441/6242
'https://doi.org/10.1162/jocn.1993.5.2.162', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/5/2/162-176/3095
'https://doi.org/10.1162/neco.1995.7.6.1129', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/7/6/1129-1159/5909
'https://doi.org/10.1162/jocn_a_00405', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/25/9/1477-1492/27980
'https://doi.org/10.1167/15.6.4', # noqa 403 Client Error: Forbidden for url: https://jov.arvojournals.org/article.aspx?doi=10.1167/15.6.4
'https://doi.org/10.7488/ds/1556', # noqa Max retries exceeded with url: /handle/10283/2189 (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach', # noqa Max retries exceeded with url: /imaging/MniTalairach (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://www.nyu.edu/', # noqa Max retries exceeded with url: / (Caused by SSLError(SSLError(1, '[SSL: DH_KEY_TOO_SMALL] dh key too small (_ssl.c:1122)')))
'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://hal.archives-ouvertes.fr/hal-01848442.*', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/
]
linkcheck_anchors = False # saves a bit of time
linkcheck_timeout = 15 # some can be quite slow
# autodoc / autosummary
autosummary_generate = True
autodoc_default_options = {'inherited-members': None}
# sphinxcontrib-bibtex
bibtex_bibfiles = ['./references.bib']
bibtex_style = 'unsrt'
bibtex_footbibliography_header = ''
# -- Nitpicky ----------------------------------------------------------------
nitpicky = True
nitpick_ignore = [
("py:class", "None. Remove all items from D."),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "an object providing a view on D's values"),
("py:class", "a shallow copy of D"),
("py:class", "(k, v), remove and return some (key, value) pair as a"),
("py:class", "_FuncT"), # type hint used in @verbose decorator
("py:class", "mne.utils._logging._FuncT"),
]
for key in ('AcqParserFIF', 'BiHemiLabel', 'Dipole', 'DipoleFixed', 'Label',
'MixedSourceEstimate', 'MixedVectorSourceEstimate', 'Report',
'SourceEstimate', 'SourceMorph', 'VectorSourceEstimate',
'VolSourceEstimate', 'VolVectorSourceEstimate',
'channels.DigMontage', 'channels.Layout',
'decoding.CSP', 'decoding.EMS', 'decoding.FilterEstimator',
'decoding.GeneralizingEstimator', 'decoding.LinearModel',
'decoding.PSDEstimator', 'decoding.ReceptiveField', 'decoding.SSD',
'decoding.SPoC', 'decoding.Scaler', 'decoding.SlidingEstimator',
'decoding.TemporalFilter', 'decoding.TimeDelayingRidge',
'decoding.TimeFrequency', 'decoding.UnsupervisedSpatialFilter',
'decoding.Vectorizer',
'preprocessing.ICA', 'preprocessing.Xdawn',
'simulation.SourceSimulator',
'time_frequency.CrossSpectralDensity',
'utils.deprecated',
'viz.ClickableImage'):
nitpick_ignore.append(('py:obj', f'mne.{key}.__hash__'))
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'icon_links': [
dict(name='GitHub',
url='https://github.com/mne-tools/mne-python',
icon='fab fa-github-square'),
dict(name='Twitter',
url='https://twitter.com/mne_python',
icon='fab fa-twitter-square'),
dict(name='Discourse',
url='https://mne.discourse.group/',
icon='fab fa-discourse'),
dict(name='Discord',
url='https://discord.gg/rKfvxTuATa',
icon='fab fa-discord')
],
'icon_links_label': 'Quick Links', # for screen reader
'use_edit_page_button': False,
'navigation_with_keys': False,
'show_toc_level': 1,
'navbar_end': ['version-switcher', 'navbar-icon-links'],
'footer_items': ['copyright'],
'google_analytics_id': 'UA-37225609-1',
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'style.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = [
'contributing.html',
'documentation.html',
'getting_started.html',
'install_mne_python.html',
]
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['search-field.html', 'sidebar-quicklinks.html'],
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# accommodate different logo shapes (width values in rem)
xs = '2'
sm = '2.5'
md = '3'
lg = '4.5'
xl = '5'
xxl = '6'
# variables to pass to HTML templating engine
html_context = {
'build_dev_html': bool(int(os.environ.get('BUILD_DEV_HTML', False))),
'versions_dropdown': {
'dev': 'v0.24 (devel)',
'stable': 'v0.23 (stable)',
'0.22': 'v0.22',
'0.21': 'v0.21',
'0.20': 'v0.20',
'0.19': 'v0.19',
'0.18': 'v0.18',
'0.17': 'v0.17',
'0.16': 'v0.16',
'0.15': 'v0.15',
'0.14': 'v0.14',
'0.13': 'v0.13',
'0.12': 'v0.12',
'0.11': 'v0.11',
},
'funders': [
dict(img='nih.png', size='3', title='National Institutes of Health'),
dict(img='nsf.png', size='3.5',
title='US National Science Foundation'),
dict(img='erc.svg', size='3.5', title='European Research Council'),
dict(img='doe.svg', size='3', title='US Department of Energy'),
dict(img='anr.svg', size='4.5',
title='Agence Nationale de la Recherche'),
dict(img='cds.png', size='2.25',
title='Paris-Saclay Center for Data Science'),
dict(img='google.svg', size='2.25', title='Google'),
dict(img='amazon.svg', size='2.5', title='Amazon'),
dict(img='czi.svg', size='2.5', title='Chan Zuckerberg Initiative'),
],
'institutions': [
dict(name='Massachusetts General Hospital',
img='MGH.svg',
url='https://www.massgeneral.org/',
size=sm),
dict(name='Athinoula A. Martinos Center for Biomedical Imaging',
img='Martinos.png',
url='https://martinos.org/',
size=md),
dict(name='Harvard Medical School',
img='Harvard.png',
url='https://hms.harvard.edu/',
size=sm),
dict(name='Massachusetts Institute of Technology',
img='MIT.svg',
url='https://web.mit.edu/',
size=md),
dict(name='New York University',
img='NYU.png',
url='https://www.nyu.edu/',
size=xs),
dict(name='Commissariat à l´énergie atomique et aux énergies alternatives', # noqa E501
img='CEA.png',
url='http://www.cea.fr/',
size=md),
dict(name='Aalto-yliopiston perustieteiden korkeakoulu',
img='Aalto.svg',
url='https://sci.aalto.fi/',
size=md),
dict(name='Télécom ParisTech',
img='Telecom_Paris_Tech.svg',
url='https://www.telecom-paris.fr/',
size=md),
dict(name='University of Washington',
img='Washington.png',
url='https://www.washington.edu/',
size=md),
dict(name='Institut du Cerveau et de la Moelle épinière',
img='ICM.jpg',
url='https://icm-institute.org/',
size=md),
dict(name='Boston University',
img='BU.svg',
url='https://www.bu.edu/',
size=lg),
dict(name='Institut national de la santé et de la recherche médicale',
img='Inserm.svg',
url='https://www.inserm.fr/',
size=xl),
dict(name='Forschungszentrum Jülich',
img='Julich.svg',
url='https://www.fz-juelich.de/',
size=xl),
dict(name='Technische Universität Ilmenau',
img='Ilmenau.gif',
url='https://www.tu-ilmenau.de/',
size=xxl),
dict(name='Berkeley Institute for Data Science',
img='BIDS.png',
url='https://bids.berkeley.edu/',
size=lg),
dict(name='Institut national de recherche en informatique et en automatique', # noqa E501
img='inria.png',
url='https://www.inria.fr/',
size=xl),
dict(name='Aarhus Universitet',
img='Aarhus.png',
url='https://www.au.dk/',
size=xl),
dict(name='Karl-Franzens-Universität Graz',
img='Graz.jpg',
url='https://www.uni-graz.at/',
size=md),
dict(name='SWPS Uniwersytet Humanistycznospołeczny',
img='SWPS.svg',
url='https://www.swps.pl/',
size=xl),
dict(name='Max-Planck-Institut für Bildungsforschung',
img='MPIB.svg',
url='https://www.mpib-berlin.mpg.de/',
size=xxl),
dict(name='Macquarie University',
img='Macquarie.png',
url='https://www.mq.edu.au/',
size=lg),
dict(name='Children’s Hospital of Philadelphia Research Institute',
img='CHOP.svg',
url='https://imaging.research.chop.edu/',
size=xxl),
],
# \u00AD is an optional hyphen (not rendered unless needed)
'carousel': [
dict(title='Source Estimation',
text='Distributed, sparse, mixed-norm, beam\u00ADformers, dipole fitting, and more.', # noqa E501
url='auto_tutorials/inverse/30_mne_dspm_loreta.html',
img='sphx_glr_30_mne_dspm_loreta_008.gif',
alt='dSPM'),
dict(title='Machine Learning',
text='Advanced decoding models including time general\u00ADiza\u00ADtion.', # noqa E501
url='auto_tutorials/machine-learning/50_decoding.html',
img='sphx_glr_50_decoding_006.png',
alt='Decoding'),
dict(title='Encoding Models',
text='Receptive field estima\u00ADtion with optional smooth\u00ADness priors.', # noqa E501
url='auto_tutorials/machine-learning/30_strf.html',
img='sphx_glr_30_strf_001.png',
alt='STRF'),
dict(title='Statistics',
text='Parametric and non-parametric, permutation tests and clustering.', # noqa E501
url='auto_tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.html', # noqa E501
img='sphx_glr_20_cluster_1samp_spatiotemporal_001.png',
alt='Clusters'),
dict(title='Connectivity',
text='All-to-all spectral and effective connec\u00ADtivity measures.', # noqa E501
url='auto_examples/connectivity/mne_inverse_label_connectivity.html', # noqa E501
img='sphx_glr_mne_inverse_label_connectivity_001.png',
alt='Connectivity'),
dict(title='Data Visualization',
text='Explore your data from multiple perspectives.',
url='auto_tutorials/evoked/20_visualize_evoked.html',
img='sphx_glr_20_visualize_evoked_007.png',
alt='Visualization'),
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = []
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
_np_print_defaults = np.get_printoptions()
# -- Warnings management -----------------------------------------------------
def reset_warnings(gallery_conf, fname):
"""Ensure we are future compatible and ignore silly warnings."""
# In principle, our examples should produce no warnings.
# Here we cause warnings to become errors, with a few exceptions.
# This list should be considered alongside
# setup.cfg -> [tool:pytest] -> filterwarnings
# remove tweaks from other module imports or example runs
warnings.resetwarnings()
# restrict
warnings.filterwarnings('error')
# allow these, but show them
warnings.filterwarnings('always', '.*non-standard config type: "foo".*')
warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*')
warnings.filterwarnings('always', '.*cannot make axes width small.*')
warnings.filterwarnings('always', '.*Axes that are not compatible.*')
warnings.filterwarnings('always', '.*FastICA did not converge.*')
# ECoG BIDS spec violations:
warnings.filterwarnings('always', '.*Fiducial point nasion not found.*')
warnings.filterwarnings('always', '.*DigMontage is only a subset of.*')
warnings.filterwarnings( # xhemi morph (should probably update sample)
'always', '.*does not exist, creating it and saving it.*')
warnings.filterwarnings('default', module='sphinx') # internal warnings
warnings.filterwarnings(
'always', '.*converting a masked element to nan.*') # matplotlib?
# allow these warnings, but don't show them
warnings.filterwarnings(
'ignore', '.*OpenSSL\\.rand is deprecated.*')
warnings.filterwarnings('ignore', '.*is currently using agg.*')
warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it)
'ignore', '.*the matrix subclass is not the recommended.*')
warnings.filterwarnings( # some joblib warning
'ignore', '.*semaphore_tracker: process died unexpectedly.*')
warnings.filterwarnings( # needed until SciPy 1.2.0 is released
'ignore', '.*will be interpreted as an array index.*', module='scipy')
for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads',
'Using or importing the ABCs from', # internal modules on 3.7
r"it will be an error for 'np\.bool_'", # ndimage
"DocumenterBridge requires a state object", # sphinx dev
"'U' mode is deprecated", # sphinx io
r"joblib is deprecated in 0\.21", # nilearn
'The usage of `cmp` is deprecated and will', # sklearn/pytest
'scipy.* is deprecated and will be removed in', # dipy
r'Converting `np\.character` to a dtype is deprecated', # vtk
r'sphinx\.util\.smartypants is deprecated',
'is a deprecated alias for the builtin', # NumPy
'the old name will be removed', # Jinja, via sphinx
'rcParams is deprecated', # PyVista rcParams -> global_theme
'to mean no clipping',
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*%s.*" % key, category=DeprecationWarning)
warnings.filterwarnings( # deal with bootstrap-theme bug
'ignore', message=".*modify script_files in the theme.*",
category=Warning)
warnings.filterwarnings( # nilearn
'ignore', message=r'sklearn\.externals\.joblib is deprecated.*',
category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'The sklearn.* module is.*', category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'Fetchers from the nilea.*', category=FutureWarning)
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*ufunc size changed.*", category=RuntimeWarning)
warnings.filterwarnings( # realtime
'ignore', message=".*unclosed file.*", category=ResourceWarning)
warnings.filterwarnings('ignore', message='Exception ignored in.*')
# allow this ImportWarning, but don't show it
warnings.filterwarnings(
'ignore', message="can't resolve package from", category=ImportWarning)
warnings.filterwarnings(
'ignore', message='.*mne-realtime.*', category=DeprecationWarning)
# In case we use np.set_printoptions in any tutorials, we only
# want it to affect those:
np.set_printoptions(**_np_print_defaults)
reset_warnings(None, None)
# -- Fontawesome support -----------------------------------------------------
# here the "b" and "s" refer to "brand" and "solid" (determines which font file
# to look in). "fw-" prefix indicates fixed width.
icons = {
'apple': 'b',
'linux': 'b',
'windows': 'b',
'hand-paper': 's',
'question': 's',
'quote-left': 's',
'rocket': 's',
'server': 's',
'fw-book': 's',
'fw-code-branch': 's',
'fw-newspaper': 's',
'fw-question-circle': 's',
'fw-quote-left': 's',
}
prolog = ''
for icon, cls in icons.items():
fw = ' fa-fw' if icon.startswith('fw-') else ''
prolog += f'''
.. |{icon}| raw:: html
<i class="fa{cls} fa-{icon[3:] if fw else icon}{fw}"></i>
'''
# -- Dependency info ----------------------------------------------------------
try:
from importlib.metadata import metadata # new in Python 3.8
min_py = metadata('mne')['Requires-Python']
except ModuleNotFoundError:
from pkg_resources import get_distribution
info = get_distribution('mne').get_metadata_lines('PKG-INFO')
for line in info:
if line.strip().startswith('Requires-Python'):
min_py = line.split(':')[1]
min_py = min_py.lstrip(' =<>')
prolog += f'\n.. |min_python_version| replace:: {min_py}\n'
# -- website redirects --------------------------------------------------------
# Static list created 2021/04/13 based on what we needed to redirect,
# since we don't need to add redirects for examples added after this date.
needed_plot_redirects = {
# tutorials
'10_epochs_overview.py', '10_evoked_overview.py', '10_overview.py',
'10_preprocessing_overview.py', '10_raw_overview.py',
'10_reading_meg_data.py', '15_handling_bad_channels.py',
'20_event_arrays.py', '20_events_from_raw.py', '20_reading_eeg_data.py',
'20_rejecting_bad_data.py', '20_visualize_epochs.py',
'20_visualize_evoked.py', '30_annotate_raw.py', '30_epochs_metadata.py',
'30_filtering_resampling.py', '30_info.py', '30_reading_fnirs_data.py',
'35_artifact_correction_regression.py', '40_artifact_correction_ica.py',
'40_autogenerate_metadata.py', '40_sensor_locations.py',
'40_visualize_raw.py', '45_projectors_background.py',
'50_artifact_correction_ssp.py', '50_configure_mne.py',
'50_epochs_to_data_frame.py', '55_setting_eeg_reference.py',
'59_head_positions.py', '60_make_fixed_length_epochs.py',
'60_maxwell_filtering_sss.py', '70_fnirs_processing.py',
# examples
'3d_to_2d.py', 'brainstorm_data.py', 'channel_epochs_image.py',
'cluster_stats_evoked.py', 'compute_csd.py',
'compute_mne_inverse_epochs_in_label.py',
'compute_mne_inverse_raw_in_label.py', 'compute_mne_inverse_volume.py',
'compute_source_psd_epochs.py', 'covariance_whitening_dspm.py',
'custom_inverse_solver.py', 'cwt_sensor_connectivity.py',
'decoding_csp_eeg.py', 'decoding_csp_timefreq.py',
'decoding_spatio_temporal_source.py', 'decoding_spoc_CMC.py',
'decoding_time_generalization_conditions.py',
'decoding_unsupervised_spatial_filter.py', 'decoding_xdawn_eeg.py',
'define_target_events.py', 'dics_source_power.py', 'eeg_csd.py',
'eeg_on_scalp.py', 'eeglab_head_sphere.py', 'elekta_epochs.py',
'ems_filtering.py', 'eog_artifact_histogram.py', 'evoked_arrowmap.py',
'evoked_ers_source_power.py', 'evoked_topomap.py', 'evoked_whitening.py',
'fdr_stats_evoked.py', 'find_ref_artifacts.py',
'fnirs_artifact_removal.py', 'forward_sensitivity_maps.py',
'gamma_map_inverse.py', 'hf_sef_data.py', 'ica_comparison.py',
'interpolate_bad_channels.py', 'label_activation_from_stc.py',
'label_from_stc.py', 'label_source_activations.py',
'left_cerebellum_volume_source.py', 'limo_data.py',
'linear_model_patterns.py', 'linear_regression_raw.py',
'meg_sensors.py', 'mixed_norm_inverse.py',
'mixed_source_space_connectivity.py', 'mixed_source_space_inverse.py',
'mne_cov_power.py', 'mne_helmet.py', 'mne_inverse_coherence_epochs.py',
'mne_inverse_connectivity_spectrum.py',
'mne_inverse_envelope_correlation.py',
'mne_inverse_envelope_correlation_volume.py',
'mne_inverse_label_connectivity.py', 'mne_inverse_psi_visual.py',
'morph_surface_stc.py', 'morph_volume_stc.py', 'movement_compensation.py',
'movement_detection.py', 'multidict_reweighted_tfmxne.py',
'muscle_detection.py', 'opm_data.py', 'otp.py', 'parcellation.py',
'psf_ctf_label_leakage.py', 'psf_ctf_vertices.py',
'psf_ctf_vertices_lcmv.py', 'publication_figure.py', 'rap_music.py',
'read_inverse.py', 'read_neo_format.py', 'read_noise_covariance_matrix.py',
'read_stc.py', 'receptive_field_mtrf.py', 'resolution_metrics.py',
'resolution_metrics_eegmeg.py', 'roi_erpimage_by_rt.py',
'sensor_connectivity.py', 'sensor_noise_level.py',
'sensor_permutation_test.py', 'sensor_regression.py',
'shift_evoked.py', 'simulate_evoked_data.py', 'simulate_raw_data.py',
'simulated_raw_data_using_subject_anatomy.py', 'snr_estimate.py',
'source_label_time_frequency.py', 'source_power_spectrum.py',
'source_power_spectrum_opm.py', 'source_simulator.py',
'source_space_morphing.py', 'source_space_snr.py',
'source_space_time_frequency.py', 'ssd_spatial_filters.py',
'ssp_projs_sensitivity_map.py', 'temporal_whitening.py',
'time_frequency_erds.py', 'time_frequency_global_field_power.py',
'time_frequency_mixed_norm_inverse.py', 'time_frequency_simulated.py',
'topo_compare_conditions.py', 'topo_customized.py',
'vector_mne_solution.py', 'virtual_evoked.py', 'xdawn_denoising.py',
'xhemi.py',
}
tu = 'auto_tutorials'
di = 'discussions'
sm = 'source-modeling'
fw = 'forward'
nv = 'inverse'
sn = 'stats-sensor-space'
sr = 'stats-source-space'
sd = 'sample-datasets'
ml = 'machine-learning'
tf = 'time-freq'
si = 'simulation'
custom_redirects = {
# Custom redirects (one HTML path to another, relative to outdir)
# can be added here as fr->to key->value mappings
f'{tu}/evoked/plot_eeg_erp.html': f'{tu}/evoked/30_eeg_erp.html',
f'{tu}/evoked/plot_whitened.html': f'{tu}/evoked/40_whitened.html',
f'{tu}/misc/plot_modifying_data_inplace.html': f'{tu}/intro/15_inplace.html', # noqa E501
f'{tu}/misc/plot_report.html': f'{tu}/intro/70_report.html',
f'{tu}/misc/plot_seeg.html': f'{tu}/clinical/20_seeg.html',
f'{tu}/misc/plot_ecog.html': f'{tu}/clinical/30_ecog.html',
f'{tu}/{ml}/plot_receptive_field.html': f'{tu}/{ml}/30_strf.html',
f'{tu}/{ml}/plot_sensors_decoding.html': f'{tu}/{ml}/50_decoding.html',
f'{tu}/{sm}/plot_background_freesurfer.html': f'{tu}/{fw}/10_background_freesurfer.html', # noqa E501
f'{tu}/{sm}/plot_source_alignment.html': f'{tu}/{fw}/20_source_alignment.html', # noqa E501
f'{tu}/{sm}/plot_forward.html': f'{tu}/{fw}/30_forward.html',
f'{tu}/{sm}/plot_eeg_no_mri.html': f'{tu}/{fw}/35_eeg_no_mri.html',
f'{tu}/{sm}/plot_background_freesurfer_mne.html': f'{tu}/{fw}/50_background_freesurfer_mne.html', # noqa E501
f'{tu}/{sm}/plot_fix_bem_in_blender.html': f'{tu}/{fw}/80_fix_bem_in_blender.html', # noqa E501
f'{tu}/{sm}/plot_compute_covariance.html': f'{tu}/{fw}/90_compute_covariance.html', # noqa E501
f'{tu}/{sm}/plot_object_source_estimate.html': f'{tu}/{nv}/10_stc_class.html', # noqa E501
f'{tu}/{sm}/plot_dipole_fit.html': f'{tu}/{nv}/20_dipole_fit.html',
f'{tu}/{sm}/plot_mne_dspm_source_localization.html': f'{tu}/{nv}/30_mne_dspm_loreta.html', # noqa E501
f'{tu}/{sm}/plot_dipole_orientations.html': f'{tu}/{nv}/35_dipole_orientations.html', # noqa E501
f'{tu}/{sm}/plot_mne_solutions.html': f'{tu}/{nv}/40_mne_fixed_free.html',
f'{tu}/{sm}/plot_beamformer_lcmv.html': f'{tu}/{nv}/50_beamformer_lcmv.html', # noqa E501
f'{tu}/{sm}/plot_visualize_stc.html': f'{tu}/{nv}/60_visualize_stc.html',
f'{tu}/{sm}/plot_eeg_mri_coords.html': f'{tu}/{nv}/70_eeg_mri_coords.html',
f'{tu}/{sd}/plot_brainstorm_phantom_elekta.html': f'{tu}/{nv}/80_brainstorm_phantom_elekta.html', # noqa E501
f'{tu}/{sd}/plot_brainstorm_phantom_ctf.html': f'{tu}/{nv}/85_brainstorm_phantom_ctf.html', # noqa E501
f'{tu}/{sd}/plot_phantom_4DBTi.html': f'{tu}/{nv}/90_phantom_4DBTi.html',
f'{tu}/{sd}/plot_brainstorm_auditory.html': f'{tu}/io/60_ctf_bst_auditory.html', # noqa E501
f'{tu}/{sd}/plot_sleep.html': f'{tu}/clinical/60_sleep.html',
f'{tu}/{di}/plot_background_filtering.html': f'{tu}/preprocessing/25_background_filtering.html', # noqa E501
f'{tu}/{di}/plot_background_statistics.html': f'{tu}/{sn}/10_background_stats.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_erp.html': f'{tu}/{sn}/20_erp_stats.html',
f'{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency.html': f'{tu}/{sn}/40_cluster_1samp_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_time_frequency.html': f'{tu}/{sn}/50_cluster_between_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_spatio_temporal_cluster_sensors.html': f'{tu}/{sn}/75_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal.html': f'{tu}/{sr}/20_cluster_1samp_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_2samp.html': f'{tu}/{sr}/30_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_repeated_measures_anova.html': f'{tu}/{sr}/60_cluster_rmANOVA_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_time_frequency_repeated_measures_anova.html': f'{tu}/{sr}/70_cluster_rmANOVA_time_freq.html', # noqa E501
f'{tu}/{tf}/plot_sensors_time_frequency.html': f'{tu}/{tf}/20_sensors_time_frequency.html', # noqa E501
f'{tu}/{tf}/plot_ssvep.html': f'{tu}/{tf}/50_ssvep.html',
f'{tu}/{si}/plot_creating_data_structures.html': f'{tu}/{si}/10_array_objs.html', # noqa E501
f'{tu}/{si}/plot_point_spread.html': f'{tu}/{si}/70_point_spread.html',
f'{tu}/{si}/plot_dics.html': f'{tu}/{si}/80_dics.html',
}
def make_redirects(app, exception):
"""Make HTML redirects."""
# https://www.sphinx-doc.org/en/master/extdev/appapi.html
# Adapted from sphinxcontrib/redirects (BSD-2-Clause)
if not isinstance(app.builder, sphinx.builders.html.StandaloneHTMLBuilder):
return
logger = sphinx.util.logging.getLogger('mne')
TEMPLATE = """\
<!DOCTYPE HTML>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<meta http-equiv="refresh" content="1; url={to}">
<script type="text/javascript">
window.location.href = "{to}"
</script>
<title>Page Redirection</title>
</head>
<body>
If you are not redirected automatically, follow this <a href='{to}'>link</a>.
</body>
</html>""" # noqa: E501
sphinx_gallery_conf = app.config['sphinx_gallery_conf']
for src_dir, out_dir in zip(sphinx_gallery_conf['examples_dirs'],
sphinx_gallery_conf['gallery_dirs']):
root = os.path.abspath(os.path.join(app.srcdir, src_dir))
fnames = [os.path.join(os.path.relpath(dirpath, root), fname)
for dirpath, _, fnames in os.walk(root)
for fname in fnames
if fname in needed_plot_redirects]
# plot_ redirects
for fname in fnames:
dirname = os.path.join(app.outdir, out_dir, os.path.dirname(fname))
to_fname = os.path.splitext(os.path.basename(fname))[0] + '.html'
fr_fname = f'plot_{to_fname}'
to_path = os.path.join(dirname, to_fname)
fr_path = os.path.join(dirname, fr_fname)
assert os.path.isfile(to_path), (fname, to_path)
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=to_fname))
logger.info(
f'Added {len(fnames):3d} HTML plot_* redirects for {out_dir}')
# custom redirects
for fr, to in custom_redirects.items():
to_path = os.path.join(app.outdir, to)
assert os.path.isfile(to_path), to
assert to_path.endswith('html'), to_path
fr_path = os.path.join(app.outdir, fr)
assert fr_path.endswith('html'), fr_path
# allow overwrite if existing file is just a redirect
if os.path.isfile(fr_path):
with open(fr_path, 'r') as fid:
for _ in range(8):
next(fid)
line = fid.readline()
assert 'Page Redirection' in line, line
# handle folders that no longer exist
if fr_path.split(os.path.sep)[-2] in (
'misc', 'discussions', 'source-modeling', 'sample-datasets'):
os.makedirs(os.path.dirname(fr_path), exist_ok=True)
# handle links to sibling folders
path_parts = to.split(os.path.sep)
path_parts = ['..'] + path_parts[(path_parts.index(tu) + 1):]
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=os.path.join(*path_parts)))
logger.info(
f'Added {len(custom_redirects):3d} HTML custom redirects')
# -- Connect our handlers to the main Sphinx app ---------------------------
def setup(app):
"""Set up the Sphinx app."""
app.connect('autodoc-process-docstring', append_attr_meth_examples)
if report_scraper is not None:
report_scraper.app = app
app.config.rst_prolog = prolog
app.connect('builder-inited', report_scraper.copyfiles)
app.connect('build-finished', make_redirects)
| 47.065158 | 300 | 0.636299 |
e2dc9c25d41243c7b937676d4e95517baca56c49 | 12,321 | py | Python | src/sagemaker/xgboost/estimator.py | matthewfollegot/sagemaker-python-sdk | 5182be3c147e8d765208a3548a55df99e3013748 | [
"Apache-2.0"
] | 1 | 2021-07-16T20:14:59.000Z | 2021-07-16T20:14:59.000Z | src/sagemaker/xgboost/estimator.py | matthewfollegot/sagemaker-python-sdk | 5182be3c147e8d765208a3548a55df99e3013748 | [
"Apache-2.0"
] | null | null | null | src/sagemaker/xgboost/estimator.py | matthewfollegot/sagemaker-python-sdk | 5182be3c147e8d765208a3548a55df99e3013748 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import logging
from sagemaker.estimator import Framework
from sagemaker.fw_registry import default_framework_uri
from sagemaker.fw_utils import (
framework_name_from_image,
framework_version_from_tag,
python_deprecation_warning,
get_unsupported_framework_version_error,
UploadedCode,
)
from sagemaker.session import Session
from sagemaker.estimator import _TrainingJob
from sagemaker.vpc_utils import VPC_CONFIG_DEFAULT
from sagemaker.xgboost.defaults import XGBOOST_NAME, XGBOOST_SUPPORTED_VERSIONS
from sagemaker.xgboost.model import XGBoostModel
logger = logging.getLogger("sagemaker")
def get_xgboost_image_uri(region, framework_version, py_version="py3"):
"""Get XGBoost framework image URI"""
image_tag = "{}-{}-{}".format(framework_version, "cpu", py_version)
return default_framework_uri(XGBoost.__framework_name__, region, image_tag)
class XGBoost(Framework):
"""Handle end-to-end training and deployment of XGBoost booster training or training using
customer provided XGBoost entry point script."""
__framework_name__ = XGBOOST_NAME
def __init__(
self,
entry_point,
framework_version,
source_dir=None,
hyperparameters=None,
py_version="py3",
image_name=None,
**kwargs
):
"""
This ``Estimator`` executes an XGBoost based SageMaker Training Job.
The managed XGBoost environment is an Amazon-built Docker container thatexecutes functions
defined in the supplied ``entry_point`` Python script.
Training is started by calling :meth:`~sagemaker.amazon.estimator.Framework.fit` on this
Estimator. After training is complete, calling
:meth:`~sagemaker.amazon.estimator.Framework.deploy` creates a hosted SageMaker endpoint
and returns an :class:`~sagemaker.amazon.xgboost.model.XGBoostPredictor` instance that
can be used to perform inference against the hosted model.
Technical documentation on preparing XGBoost scripts for SageMaker training and using the
XGBoost Estimator is available on the project home-page:
https://github.com/aws/sagemaker-python-sdk
Args:
entry_point (str): Path (absolute or relative) to the Python source file which should
be executed as the entry point to training.
This should be compatible with either Python 2.7 or Python 3.5.
framework_version (str): XGBoost version you want to use for executing your model
training code. List of supported versions
https://github.com/aws/sagemaker-python-sdk#xgboost-sagemaker-estimators
source_dir (str): Path (absolute or relative) to a directory with any other training
source code dependencies aside from tne entry point file (default: None).
Structure within this directory are preserved when training on Amazon SageMaker.
hyperparameters (dict): Hyperparameters that will be used for training (default: None).
The hyperparameters are made accessible as a dict[str, str] to the training code
on SageMaker. For convenience, this accepts other types for keys and values, but
``str()`` will be called to convert them before training.
py_version (str): Python version you want to use for executing your model
training code (default: 'py3'). One of 'py2' or 'py3'.
image_name (str): If specified, the estimator will use this image for training and
hosting, instead of selecting the appropriate SageMaker official image
based on framework_version and py_version. It can be an ECR url or
dockerhub image and tag.
Examples:
123.dkr.ecr.us-west-2.amazonaws.com/my-custom-image:1.0
custom-image:latest.
**kwargs: Additional kwargs passed to the
:class:`~sagemaker.estimator.Framework` constructor.
.. tip::
You can find additional parameters for initializing this class at
:class:`~sagemaker.estimator.Framework` and
:class:`~sagemaker.estimator.EstimatorBase`.
"""
super(XGBoost, self).__init__(
entry_point, source_dir, hyperparameters, image_name=image_name, **kwargs
)
if py_version == "py2":
logger.warning(python_deprecation_warning(self.__framework_name__))
self.py_version = py_version
if framework_version in XGBOOST_SUPPORTED_VERSIONS:
self.framework_version = framework_version
else:
raise ValueError(
get_unsupported_framework_version_error(
self.__framework_name__, framework_version, XGBOOST_SUPPORTED_VERSIONS
)
)
if image_name is None:
self.image_name = get_xgboost_image_uri(
self.sagemaker_session.boto_region_name, framework_version
)
def create_model(
self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT, **kwargs
):
"""Create a SageMaker ``XGBoostModel`` object that can be deployed to an ``Endpoint``.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used
during transform jobs. If not specified, the role from the Estimator will be used.
model_server_workers (int): Optional. The number of worker processes used by the
inference server. If None, server will use one worker per vCPU.
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the
model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
**kwargs: Passed to initialization of ``XGBoostModel``.
Returns:
sagemaker.xgboost.model.XGBoostModel: A SageMaker ``XGBoostModel`` object.
See :func:`~sagemaker.xgboost.model.XGBoostModel` for full details.
"""
role = role or self.role
# Remove unwanted entry_point kwarg
if "entry_point" in kwargs:
logger.debug("Removing unused entry_point argument: %s", str(kwargs["entry_point"]))
kwargs = {k: v for k, v in kwargs.items() if k != "entry_point"}
return XGBoostModel(
self.model_data,
role,
self.entry_point,
framework_version=self.framework_version,
source_dir=self._model_source_dir(),
enable_cloudwatch_metrics=self.enable_cloudwatch_metrics,
name=self._current_job_name,
container_log_level=self.container_log_level,
code_location=self.code_location,
py_version=self.py_version,
model_server_workers=model_server_workers,
image=self.image_name,
sagemaker_session=self.sagemaker_session,
vpc_config=self.get_vpc_config(vpc_config_override),
**kwargs
)
@classmethod
def attach(cls, training_job_name, sagemaker_session=None, model_channel_name="model"):
"""Attach to an existing training job.
Create an Estimator bound to an existing training job, each subclass
is responsible to implement
``_prepare_init_params_from_job_description()`` as this method delegates
the actual conversion of a training job description to the arguments
that the class constructor expects. After attaching, if the training job
has a Complete status, it can be ``deploy()`` ed to create a SageMaker
Endpoint and return a ``Predictor``.
If the training job is in progress, attach will block and display log
messages from the training job, until the training job completes.
Examples:
>>> my_estimator.fit(wait=False)
>>> training_job_name = my_estimator.latest_training_job.name
Later on:
>>> attached_estimator = Estimator.attach(training_job_name)
>>> attached_estimator.deploy()
Args:
training_job_name (str): The name of the training job to attach to.
sagemaker_session (sagemaker.session.Session): Session object which
manages interactions with Amazon SageMaker APIs and any other
AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
model_channel_name (str): Name of the channel where pre-trained
model data will be downloaded (default: 'model'). If no channel
with the same name exists in the training job, this option will
be ignored.
Returns:
Instance of the calling ``Estimator`` Class with the attached
training job.
"""
sagemaker_session = sagemaker_session or Session()
job_details = sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=training_job_name
)
init_params = cls._prepare_init_params_from_job_description(job_details, model_channel_name)
tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=job_details["TrainingJobArn"]
)["Tags"]
init_params.update(tags=tags)
estimator = cls(sagemaker_session=sagemaker_session, **init_params)
estimator.latest_training_job = _TrainingJob(
sagemaker_session=sagemaker_session, job_name=init_params["base_job_name"]
)
estimator._current_job_name = estimator.latest_training_job.name
estimator.latest_training_job.wait()
# pylint gets confused thinking that estimator is an EstimatorBase instance, but it actually
# is a Framework or any of its derived classes. We can safely ignore the no-member errors.
estimator.uploaded_code = UploadedCode(
estimator.source_dir, estimator.entry_point # pylint: disable=no-member
)
return estimator
@classmethod
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
"""Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
Returns:
dictionary: The transformed init_params
"""
init_params = super(XGBoost, cls)._prepare_init_params_from_job_description(job_details)
image_name = init_params.pop("image")
framework, py_version, tag, _ = framework_name_from_image(image_name)
init_params["py_version"] = py_version
if framework and framework != cls.__framework_name__:
training_job_name = init_params["base_job_name"]
raise ValueError(
"Training job: {} didn't use image for requested framework".format(
training_job_name
)
)
init_params["framework_version"] = framework_version_from_tag(tag)
if not framework:
# If we were unable to parse the framework name from the image it is not one of our
# officially supported images, in this case just add the image to the init params.
init_params["image_name"] = image_name
return init_params
| 45.464945 | 100 | 0.671618 |
882e3a1502cfd5552b9f71412489ea91b777491c | 895 | py | Python | onnxmltools/convert/coreml/operator_converters/Normalizer.py | szha/onnxmltools | b04d05bda625cbc006955ce0a220277739a95825 | [
"MIT"
] | 3 | 2019-02-27T21:03:43.000Z | 2020-04-07T22:16:50.000Z | onnxmltools/convert/coreml/operator_converters/Normalizer.py | szha/onnxmltools | b04d05bda625cbc006955ce0a220277739a95825 | [
"MIT"
] | null | null | null | onnxmltools/convert/coreml/operator_converters/Normalizer.py | szha/onnxmltools | b04d05bda625cbc006955ce0a220277739a95825 | [
"MIT"
] | 2 | 2020-10-01T09:24:55.000Z | 2021-04-17T13:57:31.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ...common._registration import register_converter
def convert_normalizer(scope, operator, container):
op_type = 'Normalizer'
attrs = {'name': operator.full_name}
norms = ['MAX', 'L1', 'L2']
norm_type = operator.raw_operator.normalizer.normType
if norm_type in range(3):
attrs['norm'] = norms[norm_type]
else:
raise ValueError('Invalid norm type: ' + norm_type)
container.add_node(op_type, operator.input_full_names, operator.output_full_names, op_domain='ai.onnx.ml', **attrs)
register_converter('normalizer', convert_normalizer)
| 37.291667 | 119 | 0.604469 |
f8fb238cdbdb8d733767962afe4cb8c0b6100912 | 8,532 | py | Python | frappe/desk/form/load.py | Mitchy25/frappe | 807dc0facb6dcdd9bd53557ce7d3c499beb1c26a | [
"MIT"
] | null | null | null | frappe/desk/form/load.py | Mitchy25/frappe | 807dc0facb6dcdd9bd53557ce7d3c499beb1c26a | [
"MIT"
] | null | null | null | frappe/desk/form/load.py | Mitchy25/frappe | 807dc0facb6dcdd9bd53557ce7d3c499beb1c26a | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
import frappe.utils
import frappe.share
import frappe.defaults
import frappe.desk.form.meta
from frappe.model.utils.user_settings import get_user_settings
from frappe.permissions import get_doc_permissions
from frappe.desk.form.document_follow import is_document_followed
from frappe import _
from six.moves.urllib.parse import quote
@frappe.whitelist()
def getdoc(doctype, name, user=None):
"""
Loads a doclist for a given document. This method is called directly from the client.
Requries "doctype", "name" as form variables.
Will also call the "onload" method on the document.
"""
if not (doctype and name):
raise Exception('doctype and name required!')
if not name:
name = doctype
if not frappe.db.exists(doctype, name):
return []
try:
doc = frappe.get_doc(doctype, name)
run_onload(doc)
if not doc.has_permission("read"):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(doctype + ' ' + name))
raise frappe.PermissionError(("read", doctype, name))
doc.apply_fieldlevel_read_permissions()
# add file list
doc.add_viewed()
get_docinfo(doc)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
raise
doc.add_seen()
frappe.response.docs.append(doc)
@frappe.whitelist()
def getdoctype(doctype, with_parent=False, cached_timestamp=None):
"""load doctype"""
docs = []
parent_dt = None
# with parent (called from report builder)
if with_parent:
parent_dt = frappe.model.meta.get_parent_dt(doctype)
if parent_dt:
docs = get_meta_bundle(parent_dt)
frappe.response['parent_dt'] = parent_dt
if not docs:
docs = get_meta_bundle(doctype)
frappe.response['user_settings'] = get_user_settings(parent_dt or doctype)
if cached_timestamp and docs[0].modified==cached_timestamp:
return "use_cache"
frappe.response.docs.extend(docs)
def get_meta_bundle(doctype):
bundle = [frappe.desk.form.meta.get_meta(doctype)]
for df in bundle[0].fields:
if df.fieldtype in frappe.model.table_fields:
bundle.append(frappe.desk.form.meta.get_meta(df.options, not frappe.conf.developer_mode))
return bundle
@frappe.whitelist()
def get_docinfo(doc=None, doctype=None, name=None):
if not doc:
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
frappe.response["docinfo"] = {
"attachments": get_attachments(doc.doctype, doc.name),
"communications": _get_communications(doc.doctype, doc.name),
'comments': get_comments(doc.doctype, doc.name),
'total_comments': len(json.loads(doc.get('_comments') or '[]')),
'versions': get_versions(doc),
"assignments": get_assignments(doc.doctype, doc.name),
"permissions": get_doc_permissions(doc),
"shared": frappe.share.get_users(doc.doctype, doc.name),
"views": get_view_logs(doc.doctype, doc.name),
"energy_point_logs": get_point_logs(doc.doctype, doc.name),
"milestones": get_milestones(doc.doctype, doc.name),
"is_document_followed": is_document_followed(doc.doctype, doc.name, frappe.session.user),
"tags": get_tags(doc.doctype, doc.name),
# "document_email": get_document_email(doc.doctype, doc.name)
}
def get_milestones(doctype, name):
return frappe.db.get_all('Milestone', fields = ['creation', 'owner', 'track_field', 'value'],
filters=dict(reference_type=doctype, reference_name=name))
def get_attachments(dt, dn):
return frappe.get_all("File", fields=["name", "file_name", "file_url", "is_private"],
filters = {"attached_to_name": dn, "attached_to_doctype": dt})
def get_versions(doc):
return frappe.get_all('Version', filters=dict(ref_doctype=doc.doctype, docname=doc.name),
fields=['name', 'owner', 'creation', 'data'], limit=10, order_by='creation desc')
@frappe.whitelist()
def get_communications(doctype, name, start=0, limit=20):
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
return _get_communications(doctype, name, start, limit)
def get_comments(doctype, name):
comments = frappe.get_all('Comment', fields = ['*'], filters = dict(
reference_doctype = doctype,
reference_name = name
))
# convert to markdown (legacy ?)
for c in comments:
if c.comment_type == 'Comment':
c.content = frappe.utils.markdown(c.content)
return comments
def get_point_logs(doctype, docname):
return frappe.db.get_all('Energy Point Log', filters={
'reference_doctype': doctype,
'reference_name': docname,
'type': ['!=', 'Review']
}, fields=['*'])
def _get_communications(doctype, name, start=0, limit=20):
communications = get_communication_data(doctype, name, start, limit)
for c in communications:
if c.communication_type=="Communication":
c.attachments = json.dumps(frappe.get_all("File",
fields=["file_url", "is_private"],
filters={"attached_to_doctype": "Communication",
"attached_to_name": c.name}
))
return communications
def get_communication_data(doctype, name, start=0, limit=20, after=None, fields=None,
group_by=None, as_dict=True):
'''Returns list of communications for a given document'''
if not fields:
fields = '''
C.name, C.communication_type, C.communication_medium,
C.comment_type, C.communication_date, C.content,
C.sender, C.sender_full_name, C.cc, C.bcc,
C.creation AS creation, C.subject, C.delivery_status,
C._liked_by, C.reference_doctype, C.reference_name,
C.read_by_recipient, C.rating
'''
conditions = ''
if after:
# find after a particular date
conditions += '''
AND C.creation > {0}
'''.format(after)
if doctype=='User':
conditions += '''
AND NOT (C.reference_doctype='User' AND C.communication_type='Communication')
'''
# communications linked to reference_doctype
part1 = '''
SELECT {fields}
FROM `tabCommunication` as C
WHERE C.communication_type IN ('Communication', 'Feedback')
AND (C.reference_doctype = %(doctype)s AND C.reference_name = %(name)s)
{conditions}
'''.format(fields=fields, conditions=conditions)
# communications linked in Timeline Links
part2 = '''
SELECT {fields}
FROM `tabCommunication` as C
INNER JOIN `tabCommunication Link` ON C.name=`tabCommunication Link`.parent
WHERE C.communication_type IN ('Communication', 'Feedback')
AND `tabCommunication Link`.link_doctype = %(doctype)s AND `tabCommunication Link`.link_name = %(name)s
{conditions}
'''.format(fields=fields, conditions=conditions)
communications = frappe.db.sql('''
SELECT *
FROM (({part1}) UNION ({part2})) AS combined
{group_by}
ORDER BY creation DESC
LIMIT %(limit)s
OFFSET %(start)s
'''.format(part1=part1, part2=part2, group_by=(group_by or '')), dict(
doctype=doctype,
name=name,
start=frappe.utils.cint(start),
limit=limit
), as_dict=as_dict)
return communications
def get_assignments(dt, dn):
cl = frappe.get_all("ToDo",
fields=['name', 'owner', 'description', 'status'],
limit= 5,
filters={
'reference_type': dt,
'reference_name': dn,
'status': ('!=', 'Cancelled'),
})
return cl
@frappe.whitelist()
def get_badge_info(doctypes, filters):
filters = json.loads(filters)
doctypes = json.loads(doctypes)
filters["docstatus"] = ["!=", 2]
out = {}
for doctype in doctypes:
out[doctype] = frappe.db.get_value(doctype, filters, "count(*)")
return out
def run_onload(doc):
doc.set("__onload", frappe._dict())
doc.run_method("onload")
def get_view_logs(doctype, docname):
""" get and return the latest view logs if available """
logs = []
if hasattr(frappe.get_meta(doctype), 'track_views') and frappe.get_meta(doctype).track_views:
view_logs = frappe.get_all("View Log", filters={
"reference_doctype": doctype,
"reference_name": docname,
}, fields=["name", "creation", "owner"], order_by="creation desc")
if view_logs:
logs = view_logs
return logs
def get_tags(doctype, name):
tags = [tag.tag for tag in frappe.get_all("Tag Link", filters={
"document_type": doctype,
"document_name": name
}, fields=["tag"])]
return ",".join(tags)
def get_document_email(doctype, name):
# import pdb
# pdb.set_trace()
email = get_automatic_email_link()
if not email:
return None
email = email.split("@")
return "{0}+{1}+{2}@{3}".format(email[0], quote(doctype), quote(name), email[1])
def get_automatic_email_link():
return frappe.db.get_value("Email Account", {"enable_incoming": 1, "enable_automatic_linking": 1}, "email_id")
| 30.14841 | 111 | 0.723043 |
b5212334dfc9a0ccf2e802366139f23c10cdbaea | 939 | py | Python | python/router/__init__.py | michaeldboyd/indy-agent | 3371a44144f45c7978b3454a440fe143eda7b1de | [
"Apache-2.0"
] | null | null | null | python/router/__init__.py | michaeldboyd/indy-agent | 3371a44144f45c7978b3454a440fe143eda7b1de | [
"Apache-2.0"
] | 2 | 2020-07-17T03:35:07.000Z | 2021-05-08T23:22:31.000Z | python/router/__init__.py | michaeldboyd/indy-agent | 3371a44144f45c7978b3454a440fe143eda7b1de | [
"Apache-2.0"
] | 2 | 2019-05-30T06:56:20.000Z | 2019-11-05T16:39:59.000Z | """ Module containing router implementations.
A base router is provided to show the basic interface of routers.
"""
from typing import Callable
from python_agent_utils.messages.message import Message
class BaseRouter:
""" Router Base Class. Provide basic interface for additional routers.
"""
async def register(self, msg_type: str, handler: Callable[[bytes], None]):
""" Register a callback for messages with a given type.
"""
raise NotImplementedError("`register` not implemented in BaseRouter!")
async def route(self, msg: Message):
""" Route a message to it's registered callback
"""
raise NotImplementedError("`route` not implemented in BaseRouter!")
class RouteAlreadyRegisteredException(Exception):
""" Route Already Registered Exception.
Raised by router.register
"""
pass
class UnparsableMessageFamilyException(Exception):
pass
| 27.617647 | 78 | 0.702875 |
ffe5ba68e1b704fd7b3f2ee9ce002364b28d0451 | 6,793 | py | Python | scripts/classcompare.py | SaierLaboratory/biotools | 0d17655ab2e60c4750a753dddd68517ec5ca6f1f | [
"BSD-3-Clause"
] | 2 | 2018-08-29T14:50:25.000Z | 2018-09-12T13:45:25.000Z | scripts/classcompare.py | SaierLaboratory/biotools | 0d17655ab2e60c4750a753dddd68517ec5ca6f1f | [
"BSD-3-Clause"
] | 5 | 2018-06-15T01:04:13.000Z | 2021-03-02T15:58:33.000Z | scripts/classcompare.py | SaierLaboratory/biotools | 0d17655ab2e60c4750a753dddd68517ec5ca6f1f | [
"BSD-3-Clause"
] | 2 | 2019-07-19T07:59:15.000Z | 2019-11-27T18:53:03.000Z | #!/usr/bin/env python
# ClassCompare - Compare entire TC classes to one another using P2.
# This program will skip members of the same superfamily.
import protocol2
import tcdb
import urllib
import scala
import os,re
import blast
import shutil
import pickle
import templates
#import resource
from Bio import SeqIO
from multiprocessing import cpu_count
from optparse import OptionParser
#resource.setrlimit(resource.RLIMIT_NOFILE, (10000,-1))
class Compare:
def __init__(self):
# Runtime Variables
self.subject = None
self.target = None
self.threads = cpu_count()
self.outdir = os.environ['HOME']+'/db/icc'
# Internal Matricies
self.familyrelations = []
self.names = None
self.pool = None
self.template = os.environ['CLASSCOMPARE_TEMPLATE']
self.fastahome = os.environ['HOME']+'/db/families'
if os.path.exists(self.fastahome) is False:
os.makedirs(self.fastahome)
if os.path.exists(self.outdir) is False:
os.makedirs(self.outdir)
def __call__(self):
# Check if this ICC has been done.
mylock = '%s/%s-%s.lock'%(self.outdir,self.subject,self.target)
#if os.path.exists(mylock) is True:
# return
# First load all interfamily relations
url = 'http://tcdb.org/cgi-bin/projectv/classcompare.py?a=%s&b=%s'%(self.subject,self.target)
results = urllib.urlopen(url)
[self.familyrelations.append(i.split('\t')) for i in results.read().split('\n')]
results.close()
self.familyrelations.pop()
self.names = tcdb.Names()
# Generate FASTA files & directories
self.generate_fastas()
# We start multithreading after the remote BLAST searches are done.
self.pool = scala.ThreadPool(self.threads)
for subject,target in self.familyrelations:
print subject,target
self.pool.queueTask(self.subprotocol,(subject,target))
self.pool.joinAll()
open(mylock,'wb')
#self.generate_report()
def generate_fastas(self):
if os.path.exists(self.fastahome) is False:
os.makedirs(self.fastahome)
families = []
for i in self.familyrelations:
families.extend(i)
families = list(set(families))
myblast=blast.tools()
for family in families:
myblast.gis = []
if os.path.exists(self.fastahome+'/'+family+'.faa'):
continue
print 'Blasting :: %s'%self.names.get_family_abr(family)
fastas = tcdb.define_family(family,True)
if fastas is False:
continue
for fasta in fastas:
myblast.blastp(str(fasta.seq))
# Done, write out this family
if bool(len(myblast.gis)) is False:
continue
myblast.build_raw_fasta()
shutil.copy(myblast.raw_fasta.name,self.fastahome+'/'+family+'.faa')
def subprotocol(self,data):
(subject,target) = data
thisdir = '%s/%s-%s'%(self.outdir,subject,target)
if os.path.exists(thisdir+'/gsat.matrix'):
return
# Set Protocol1 Variables
protocol = protocol2.Compare()
protocol.subject_file = self.fastahome+'/'+subject+'.faa'
protocol.target_file = self.fastahome+'/'+target+'.faa'
protocol.outdir = thisdir
protocol.subject_name = self.names.get_family_abr(subject)
protocol.target_name = self.names.get_family_abr(target)
if os.path.exists(protocol.subject_file) is False or os.path.exists(protocol.target_file) is False:
return
if os.path.exists(thisdir) is False:
os.makedirs(thisdir)
print "Comparison :: %s vs. %s // STARTED"%(subject,target)
protocol()
print "Comparison :: %s vs. %s // DONE!"%(subject,target)
def generate_report(self):
template = open(self.template,'r').read()
rows = re.search('{ROW}(.+?){/ROW}',template,re.DOTALL).groups()[0]
# Iterate through directories
dirs = os.listdir(self.outdir+'/p2/')
format = re.compile('(\d+\.[A-Z]\.\d+)-(\d+\.[A-Z]\.\d+)')
folders = [i for i in dirs if bool(format.search(i))]
myrows = []
for folder in folders:
matrix = self.outdir+'/p2/'+folder+'/gsat.matrix'
if os.path.exists(matrix):
try:
h = open(matrix,'r')
matrix = pickle.load(h)
h.close()
except:
print 'corrupt pickle. Running again...'
os.remove(matrix)
self.subprotocol(folder.split('-'))
matrix = pickle.load(open(matrix,'r'))
if bool(len(matrix)) is False:
continue
matrix.sort(key=lambda x:x['gsat_score'],reverse=True)
(subject,target) = folder.split('-')
row = rows[:]
row = row.replace('%SUBJECT%',subject)
row = row.replace('%TARGET%',target)
row = row.replace('%SUBJECTNAME%',self.names.get_family_abr(subject))
row = row.replace('%TARGETNAME%',self.names.get_family_abr(target))
row = row.replace('%SCORE%', str(matrix[0]['gsat_score']))
myrows.append((row,matrix[0]['gsat_score']))
myrows.sort(key=lambda x:x[1],reverse=True)
template = re.sub('{ROW}.+?{/ROW}',"\n".join([i[0] for i in myrows]),template,flags=re.DOTALL)
handle = open(self.outdir+'/report.html','wb')
handle.write(template)
print 'Done.'
if __name__=='__main__':
opts = OptionParser(description='Compare entire TC Classes (2 Digit TCIDS). Just select two TC classes to compare. You can compare a class to itself and I will\
automatically ignore families that are already known to be homologous to save time.',version=1.0)
opts.add_option('-a',action='store',type='string',dest='subject_class',default=None,help='Subject class. Ex. 1.A')
opts.add_option('-b',action='store',type='string',dest='target_class',default=None,help='Subject class. Ex. 1.B')
opts.add_option('-o',action='store',type='string',dest='outdir',default=os.environ['HOME']+'/db/icc',help='Output Directory')
opts.add_option('--threads',action='store',type='int',dest='threads',default=cpu_count(),help='Threads to use')
(cli,args) = opts.parse_args()
(a,b,outdir)=(cli.subject_class,cli.target_class,cli.outdir)
if a is None or b is None:
opts.print_help()
quit()
cc = Compare()
cc.subject=a
cc.target=b
cc.outdir=outdir
cc.threads=cli.threads
cc()
| 41.420732 | 164 | 0.599441 |
a3c980cfd55ca3c23a5bb896a0a6efd3d994521c | 6,762 | py | Python | kubernetes_asyncio/client/models/v1_deployment.py | aK0nshin/kubernetes_asyncio | aef9edcc1f8671a5b1bba9f4684bde890176b19c | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_deployment.py | aK0nshin/kubernetes_asyncio | aef9edcc1f8671a5b1bba9f4684bde890176b19c | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_deployment.py | aK0nshin/kubernetes_asyncio | aef9edcc1f8671a5b1bba9f4684bde890176b19c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1Deployment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1DeploymentSpec',
'status': 'V1DeploymentStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): # noqa: E501
"""V1Deployment - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1Deployment. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Deployment. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Deployment.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Deployment. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Deployment. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Deployment. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Deployment.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Deployment. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Deployment. # noqa: E501
:return: The metadata of this V1Deployment. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Deployment.
:param metadata: The metadata of this V1Deployment. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1Deployment. # noqa: E501
:return: The spec of this V1Deployment. # noqa: E501
:rtype: V1DeploymentSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Deployment.
:param spec: The spec of this V1Deployment. # noqa: E501
:type: V1DeploymentSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1Deployment. # noqa: E501
:return: The status of this V1Deployment. # noqa: E501
:rtype: V1DeploymentStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Deployment.
:param status: The status of this V1Deployment. # noqa: E501
:type: V1DeploymentStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Deployment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.597285 | 295 | 0.603668 |
773ffed8473b6f6a1153fd7bfce27f0c5a647ff1 | 2,004 | py | Python | astr2600/galileo.py | zkbt/cu-astr2600 | cfba78289f664a260d7e5cd7a1e45a6e9b06ec33 | [
"MIT"
] | 1 | 2020-07-14T00:09:43.000Z | 2020-07-14T00:09:43.000Z | astr2600/galileo.py | zkbt/cu-astr2600 | cfba78289f664a260d7e5cd7a1e45a6e9b06ec33 | [
"MIT"
] | null | null | null | astr2600/galileo.py | zkbt/cu-astr2600 | cfba78289f664a260d7e5cd7a1e45a6e9b06ec33 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
def drawMoons(names, xpositions, xlim=[-500,500], labels=True):
'''
Draw a plot of the positions of moons relative to Jupiter.
This function requires two input arguments. They should
both be lists, and they should be the same size as each
other. They are:
moons -- a 1-dimensional list of moon names
xpositions -- a 1-dimensional list of moon positions (in arcsec)
For example:
names = ['Io', 'Europa', 'Ganymede', 'Callisto']
xpositions = [-20, 40, 80, -160]
drawMoons(names, xpositions)
(this should display a plot of the moon positions)
Options keyword arguments
xlim = [-500,500]
This defines the x values of the left and
right edges of the plotting range to be included.
labels = True
If the function is called with labels=True,
then display the names of the moons.
If the function is called with labels=False,
then do not display the names of the moons.
'''
# since we're plotting only 1D positions, we make up y-values
ypositions = np.zeros_like(xpositions)
# we create a new figure, and set its size
plt.figure(figsize=(10,0.5))
# we plot the moons in their positions
plt.plot(xpositions, ypositions,
marker = '.',
linewidth=0,
color='black')
# if desired, we add text labels to all the moons
if labels:
for x, y, n in zip(xpositions, ypositions, names):
plt.text(x, y+0.5, n, ha='center', va='bottom', size=9)
# plot Jupiter in the center
plt.plot(0,0, marker='o', markersize=20, markerfacecolor='none', markeredgecolor='black')
# set the x and y limits of the plot
plt.xlim(*xlim)
plt.ylim(-1,1)
# turn off all axis labels (and the box around the plot)
plt.axis('off')
# make sure the plot shows to the screen
plt.show()
| 30.363636 | 93 | 0.621257 |
a5c3ac1db1bd2ad5b98d68b132faf92132eea658 | 32,664 | py | Python | python/ccxt/async_support/bitmart.py | alok46/ccxt | ea94147e03e6974384727ead440a7cd2e80f7078 | [
"MIT"
] | 1 | 2019-10-30T13:32:54.000Z | 2019-10-30T13:32:54.000Z | python/ccxt/async_support/bitmart.py | alok46/ccxt | ea94147e03e6974384727ead440a7cd2e80f7078 | [
"MIT"
] | null | null | null | python/ccxt/async_support/bitmart.py | alok46/ccxt | ea94147e03e6974384727ead440a7cd2e80f7078 | [
"MIT"
] | 2 | 2019-12-02T10:32:52.000Z | 2020-03-22T00:58:58.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class bitmart (Exchange):
def describe(self):
return self.deep_extend(super(bitmart, self).describe(), {
'id': 'bitmart',
'name': 'BitMart',
'countries': ['US', 'CN', 'HK', 'KR'],
'rateLimit': 1000,
'version': 'v2',
'has': {
'CORS': True,
'fetchMarkets': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchCurrencies': True,
'fetchOrderBook': True,
'fetchTrades': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchBalance': True,
'createOrder': True,
'cancelOrder': True,
'cancelAllOrders': True,
'fetchOrders': False,
'fetchOrderTrades': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchCanceledOrders': True,
'fetchOrder': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/61835713-a2662f80-ae85-11e9-9d00-6442919701fd.jpg',
'api': 'https://openapi.bitmart.com',
'www': 'https://www.bitmart.com/',
'doc': 'https://github.com/bitmartexchange/bitmart-official-api-docs',
'referral': 'http://www.bitmart.com/?r=rQCFLh',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'token': {
'post': [
'authentication',
],
},
'public': {
'get': [
'currencies',
'ping',
'steps',
'symbols',
'symbols_details',
'symbols/{symbol}/kline',
'symbols/{symbol}/orders',
'symbols/{symbol}/trades',
'ticker',
'time',
],
},
'private': {
'get': [
'orders',
'orders/{id}',
'trades',
'wallet',
],
'post': [
'orders',
],
'delete': [
'orders',
'orders/{id}',
],
},
},
'timeframes': {
'1m': 1,
'3m': 3,
'5m': 5,
'15m': 15,
'30m': 30,
'45m': 45,
'1h': 60,
'2h': 120,
'3h': 180,
'4h': 240,
'1d': 1440,
'1w': 10080,
'1M': 43200,
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.002,
'maker': 0.001,
'tiers': {
'taker': [
[0, 0.20 / 100],
[10, 0.18 / 100],
[50, 0.16 / 100],
[250, 0.14 / 100],
[1000, 0.12 / 100],
[5000, 0.10 / 100],
[25000, 0.08 / 100],
[50000, 0.06 / 100],
],
'maker': [
[0, 0.1 / 100],
[10, 0.09 / 100],
[50, 0.08 / 100],
[250, 0.07 / 100],
[1000, 0.06 / 100],
[5000, 0.05 / 100],
[25000, 0.04 / 100],
[50000, 0.03 / 100],
],
},
},
},
'exceptions': {
'exact': {
'Place order error': InvalidOrder, # {"message":"Place order error"}
'Not found': OrderNotFound, # {"message":"Not found"}
'Visit too often, please try again later': DDoSProtection, # {"code":-30,"msg":"Visit too often, please try again later","subMsg":"","data":{}}
},
'broad': {
'Maximum price is': InvalidOrder, # {"message":"Maximum price is 0.112695"}
# {"message":"Required Integer parameter 'status' is not present"}
# {"message":"Required String parameter 'symbol' is not present"}
# {"message":"Required Integer parameter 'offset' is not present"}
# {"message":"Required Integer parameter 'limit' is not present"}
# {"message":"Required Long parameter 'from' is not present"}
# {"message":"Required Long parameter 'to' is not present"}
'is not present': BadRequest,
},
},
})
async def fetch_time(self, params={}):
response = await self.publicGetTime(params)
#
# {
# "server_time": 1527777538000
# }
#
return self.safe_integer(response, 'server_time')
async def sign_in(self, params={}):
message = self.apiKey + ':' + self.secret + ':' + self.uid
data = {
'grant_type': 'client_credentials',
'client_id': self.apiKey,
'client_secret': self.hmac(self.encode(message), self.encode(self.secret), hashlib.sha256),
}
response = await self.tokenPostAuthentication(self.extend(data, params))
accessToken = self.safe_string(response, 'access_token')
if not accessToken:
raise AuthenticationError(self.id + ' signIn() failed to authenticate. Access token missing from response.')
expiresIn = self.safe_integer(response, 'expires_in')
self.options['expires'] = self.sum(self.nonce(), expiresIn * 1000)
self.options['accessToken'] = accessToken
return response
async def fetch_markets(self, params={}):
markets = await self.publicGetSymbolsDetails(params)
#
# [
# {
# "id":"1SG_BTC",
# "base_currency":"1SG",
# "quote_currency":"BTC",
# "quote_increment":"0.1",
# "base_min_size":"0.1000000000",
# "base_max_size":"10000000.0000000000",
# "price_min_precision":4,
# "price_max_precision":6,
# "expiration":"NA"
# }
# ]
#
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
#
# https://github.com/bitmartexchange/bitmart-official-api-docs/blob/master/rest/public/symbols_details.md#response-details
# from the above API doc:
# quote_increment Minimum order price as well as the price increment
# price_min_precision Minimum price precision(digit) used to query price and kline
# price_max_precision Maximum price precision(digit) used to query price and kline
#
# the docs are wrong: https://github.com/ccxt/ccxt/issues/5612
#
quoteIncrement = self.safe_string(market, 'quote_increment')
amountPrecision = self.precision_from_string(quoteIncrement)
pricePrecision = self.safe_integer(market, 'price_max_precision')
precision = {
'amount': amountPrecision,
'price': pricePrecision,
}
limits = {
'amount': {
'min': self.safe_float(market, 'base_min_size'),
'max': self.safe_float(market, 'base_max_size'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'limits': limits,
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
marketId = self.safe_string(ticker, 'symbol_id')
symbol = None
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
last = self.safe_float(ticker, 'current_price')
percentage = self.safe_float(ticker, 'fluctuation')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highest_price'),
'low': self.safe_float(ticker, 'lowest_price'),
'bid': self.safe_float(ticker, 'bid_1'),
'bidVolume': self.safe_float(ticker, 'bid_1_amount'),
'ask': self.safe_float(ticker, 'ask_1'),
'askVolume': self.safe_float(ticker, 'ask_1_amount'),
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage * 100,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'base_volume'),
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
response = await self.publicGetTicker(self.extend(request, params))
#
# {
# "volume":"97487.38",
# "ask_1":"0.00148668",
# "base_volume":"144.59",
# "lowest_price":"0.00144362",
# "bid_1":"0.00148017",
# "highest_price":"0.00151000",
# "ask_1_amount":"92.03",
# "current_price":"0.00148230",
# "fluctuation":"+0.0227",
# "symbol_id":"XRP_ETH",
# "url":"https://www.bitmart.com/trade?symbol=XRP_ETH",
# "bid_1_amount":"134.78"
# }
#
return self.parse_ticker(response)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTicker(params)
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_currencies(self, params={}):
currencies = await self.publicGetCurrencies(params)
#
# [
# {
# "name":"CNY1",
# "withdraw_enabled":false,
# "id":"CNY1",
# "deposit_enabled":false
# }
# ]
#
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
currencyId = self.safe_string(currency, 'id')
code = self.safe_currency_code(currencyId)
name = self.safe_string(currency, 'name')
withdrawEnabled = self.safe_value(currency, 'withdraw_enabled')
depositEnabled = self.safe_value(currency, 'deposit_enabled')
active = withdrawEnabled and depositEnabled
result[code] = {
'id': currencyId,
'code': code,
'name': name,
'info': currency, # the original payload
'active': active,
'fee': None,
'precision': None,
'limits': {
'amount': {'min': None, 'max': None},
'price': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'withdraw': {'min': None, 'max': None},
},
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
# 'precision': 4, # optional price precision / depth level whose range is defined in symbol details
}
response = await self.publicGetSymbolsSymbolOrders(self.extend(request, params))
return self.parse_order_book(response, None, 'buys', 'sells', 'price', 'amount')
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "amount":"2.29275119",
# "price":"0.021858",
# "count":"104.8930",
# "order_time":1563997286061,
# "type":"sell"
# }
#
# fetchMyTrades(private)
#
# {
# "symbol": "BMX_ETH",
# "amount": "1.0",
# "fees": "0.0005000000",
# "trade_id": 2734956,
# "price": "0.00013737",
# "active": True,
# "entrust_id": 5576623,
# "timestamp": 1545292334000
# }
#
id = self.safe_string(trade, 'trade_id')
timestamp = self.safe_integer_2(trade, 'timestamp', 'order_time')
type = None
side = self.safe_string_lower(trade, 'type')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = amount * price
orderId = self.safe_integer(trade, 'entrust_id')
marketId = self.safe_string(trade, 'symbol')
symbol = None
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if symbol is None:
if market is not None:
symbol = market['symbol']
feeCost = self.safe_float(trade, 'fees')
fee = None
if feeCost is not None:
# is it always quote, always base, or base-quote depending on the side?
feeCurrencyCode = None
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': None,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetSymbolsSymbolTrades(self.extend(request, params))
#
# [
# {
# "amount":"2.29275119",
# "price":"0.021858",
# "count":"104.8930",
# "order_time":1563997286061,
# "type":"sell"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'offset': 0, # current page, starts from 0
}
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# {
# "total_trades": 216,
# "total_pages": 22,
# "current_page": 0,
# "trades": [
# {
# "symbol": "BMX_ETH",
# "amount": "1.0",
# "fees": "0.0005000000",
# "trade_id": 2734956,
# "price": "0.00013737",
# "active": True,
# "entrust_id": 5576623,
# "timestamp": 1545292334000
# },
# ]
# }
#
trades = self.safe_value(response, 'trades', [])
return self.parse_trades(trades, market, since, limit)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'entrust_id': id,
}
return await self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
self.safe_integer(ohlcv, 'timestamp'),
self.safe_float(ohlcv, 'open_price'),
self.safe_float(ohlcv, 'highest_price'),
self.safe_float(ohlcv, 'lowest_price'),
self.safe_float(ohlcv, 'current_price'),
self.safe_float(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV requires either a `since` argument or a `limit` argument(or both)')
await self.load_markets()
market = self.market(symbol)
periodInSeconds = self.parse_timeframe(timeframe)
duration = periodInSeconds * limit * 1000
to = self.milliseconds()
if since is None:
since = to - duration
else:
to = self.sum(since, duration)
request = {
'symbol': market['id'],
'from': since, # start time of k-line data(in milliseconds, required)
'to': to, # end time of k-line data(in milliseconds, required)
'step': self.timeframes[timeframe], # steps of sampling(in minutes, default 1 minute, optional)
}
response = await self.publicGetSymbolsSymbolKline(self.extend(request, params))
#
# [
# {
# "timestamp":1525761000000,
# "open_price":"0.010130",
# "highest_price":"0.010130",
# "lowest_price":"0.010130",
# "current_price":"0.010130",
# "volume":"0.000000"
# }
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
balances = await self.privateGetWallet(params)
#
# [
# {
# "name":"Bitcoin",
# "available":"0.0000000000",
# "frozen":"0.0000000000",
# "id":"BTC"
# }
# ]
#
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'id')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'available')
account['used'] = self.safe_float(balance, 'frozen')
result[code] = account
return self.parse_balance(result)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "entrust_id":1223181
# }
#
# cancelOrder
#
# {}
#
# fetchOrder, fetchOrdersByStatus, fetchOpenOrders, fetchClosedOrders
#
# {
# "entrust_id":1223181,
# "symbol":"BMX_ETH",
# "timestamp":1528060666000,
# "side":"buy",
# "price":"1.000000",
# "fees":"0.1",
# "original_amount":"1",
# "executed_amount":"1",
# "remaining_amount":"0",
# "status":3
# }
#
id = self.safe_string(order, 'entrust_id')
timestamp = self.milliseconds()
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = self.find_symbol(self.safe_string(order, 'symbol'), market)
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'original_amount')
cost = None
filled = self.safe_float(order, 'executed_amount')
remaining = self.safe_float(order, 'remaining_amount')
if amount is not None:
if remaining is not None:
if filled is None:
filled = amount - remaining
if filled is not None:
if remaining is None:
remaining = amount - filled
if cost is None:
if price is not None:
cost = price * filled
side = self.safe_string(order, 'side')
type = None
return {
'id': id,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def parse_order_status(self, status):
statuses = {
'0': 'all',
'1': 'open',
'2': 'open',
'3': 'closed',
'4': 'canceled',
'5': 'open',
'6': 'closed',
}
return self.safe_string(statuses, status, status)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'side': side.lower(),
'amount': float(self.amount_to_precision(symbol, amount)),
'price': float(self.price_to_precision(symbol, price)),
}
response = await self.privatePostOrders(self.extend(request, params))
#
# {
# "entrust_id":1223181
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
intId = int(id)
request = {
'id': intId,
'entrust_id': intId,
}
response = await self.privateDeleteOrdersId(self.extend(request, params))
#
# responds with an empty object {}
#
return self.parse_order(response)
async def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders requires a symbol argument')
side = self.safe_string(params, 'side')
if side is None:
raise ArgumentsRequired(self.id + " cancelAllOrders requires a `side` parameter('buy' or 'sell')")
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'side': side, # 'buy' or 'sell'
}
response = await self.privateDeleteOrders(self.extend(request, params))
#
# responds with an empty object {}
#
return response
async def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByStatus requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 500 # default 500, max 1000
request = {
'symbol': market['id'],
'status': status,
'offset': 0, # current page, starts from 0
'limit': limit,
}
response = await self.privateGetOrders(self.extend(request, params))
#
# {
# "orders":[
# {
# "entrust_id":1223181,
# "symbol":"BMX_ETH",
# "timestamp":1528060666000,
# "side":"buy",
# "price":"1.000000",
# "fees":"0.1",
# "original_amount":"1",
# "executed_amount":"1",
# "remaining_amount":"0",
# "status":3
# }
# ],
# "total_pages":1,
# "total_orders":1,
# "current_page":0,
# }
#
orders = self.safe_value(response, 'orders', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
# 5 = pending & partially filled orders
return await self.fetch_orders_by_status(5, symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# 3 = closed orders
return await self.fetch_orders_by_status(3, symbol, since, limit, params)
async def fetch_canceled_orders(self, symbol=None, since=None, limit=None, params={}):
# 4 = canceled orders
return await self.fetch_orders_by_status(4, symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetOrdersId(self.extend(request, params))
#
# {
# "entrust_id":1223181,
# "symbol":"BMX_ETH",
# "timestamp":1528060666000,
# "side":"buy",
# "price":"1.000000",
# "fees":"0.1",
# "original_amount":"1",
# "executed_amount":"1",
# "remaining_amount":"0",
# "status":3
# }
#
return self.parse_order(response)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'token':
self.check_required_credentials()
body = self.urlencode(query)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
nonce = self.nonce()
self.check_required_credentials()
token = self.safe_string(self.options, 'accessToken')
if token is None:
raise AuthenticationError(self.id + ' ' + path + ' endpoint requires an accessToken option or a prior call to signIn() method')
expires = self.safe_integer(self.options, 'expires')
if expires is not None:
if nonce >= expires:
raise AuthenticationError(self.id + ' accessToken expired, supply a new accessToken or call the signIn() method')
if query:
url += '?' + self.urlencode(query)
headers = {
'Content-Type': 'application/json',
'X-BM-TIMESTAMP': str(nonce),
'X-BM-AUTHORIZATION': 'Bearer ' + token,
}
if method != 'GET':
query = self.keysort(query)
body = self.json(query)
message = self.urlencode(query)
headers['X-BM-SIGNATURE'] = self.hmac(self.encode(message), self.encode(self.secret), hashlib.sha256)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"message":"Maximum price is 0.112695"}
# {"message":"Required Integer parameter 'status' is not present"}
# {"message":"Required String parameter 'symbol' is not present"}
# {"message":"Required Integer parameter 'offset' is not present"}
# {"message":"Required Integer parameter 'limit' is not present"}
# {"message":"Required Long parameter 'from' is not present"}
# {"message":"Required Long parameter 'to' is not present"}
# {"message":"Invalid status. status=6 not support any more, please use 3:deal_success orders, 4:cancelled orders"}
# {"message":"Not found"}
# {"message":"Place order error"}
#
feedback = self.id + ' ' + body
message = self.safe_string_2(response, 'message', 'msg')
if message is not None:
exact = self.exceptions['exact']
if message in exact:
raise exact[message](feedback)
broad = self.exceptions['broad']
broadKey = self.findBroadlyMatchedKey(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
raise ExchangeError(feedback) # unknown message
| 38.747331 | 164 | 0.477774 |
f83e842051d7720c1ed75e98c6569e905db61c58 | 601 | py | Python | library/core/schema.py | gabrielloliveira/library-ql | 65776af5037b03dfe899c978dbb13e6c473e6c92 | [
"MIT"
] | 2 | 2021-05-02T16:26:56.000Z | 2021-05-05T19:05:18.000Z | library/core/schema.py | gabrielloliveira/library-ql | 65776af5037b03dfe899c978dbb13e6c473e6c92 | [
"MIT"
] | null | null | null | library/core/schema.py | gabrielloliveira/library-ql | 65776af5037b03dfe899c978dbb13e6c473e6c92 | [
"MIT"
] | null | null | null | import graphene
from graphene import relay
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Book
class BookNode(DjangoObjectType):
class Meta:
model = Book
fields = ("uuid", "created_at", "updated_at", "name")
filter_fields = {
"name": ["exact", "icontains", "istartswith"],
"uuid": ["exact"],
}
interfaces = (relay.Node,)
class Query(graphene.ObjectType):
book = relay.Node.Field(BookNode)
all_books = DjangoFilterConnectionField(BookNode)
| 26.130435 | 62 | 0.673877 |
e0dbb35cdb1432a040ea41e02ab6ceb6622ed5ab | 3,090 | py | Python | edx_data_research/reporting/basic/ip_to_country.py | gopa1959/test | 3e224d0d86015b1e3e2da426e914aeb86c80d3c8 | [
"MIT"
] | null | null | null | edx_data_research/reporting/basic/ip_to_country.py | gopa1959/test | 3e224d0d86015b1e3e2da426e914aeb86c80d3c8 | [
"MIT"
] | null | null | null | edx_data_research/reporting/basic/ip_to_country.py | gopa1959/test | 3e224d0d86015b1e3e2da426e914aeb86c80d3c8 | [
"MIT"
] | null | null | null | '''
This module retrieve IP addresses for each student and maps their IP to a country
This is to determine the diversity of students who took a given course
The geoip module and GeoIP.dat file was used to map the IP address to a country
Each user may have multiple ips, so this module retrieves all the countries
mapped to those ips. Disclaimer: The accuracy of the IP to Country cannot be
determined as it is difficult to determine if the IP is an actual IP or a proxy IP
At the end of the analysis, the results are used to create a Pie Chart to
visualize the distribution
'''
import csv
import geoip
import os
from collections import defaultdict
def ip_to_country(edx_obj):
edx_obj.collections = ['tracking']
data_directory = os.path.abspath(os.path.dirname(__file__) + "/../data")
with open(os.path.join(data_directory, 'country_code_to_country.csv')) as csv_file:
reader = csv.reader(csv_file)
country_code_to_country = dict(reader)
cursor = edx_obj.collections['tracking'].find()
tracking = defaultdict(set)
for index, item in enumerate(cursor):
tracking[item['username']].add(item['ip'])
result = []
country_set = set()
for key, value_set in tracking.iteritems():
for value in value_set:
try:
country_code = geoip.country(value, dbname=os.path.join(
data_directory, 'GeoIP.dat'))
country = country_code_to_country[country_code]
if not key:
key = 'anonymous'
result.append([key, value, country_code, country])
elif (key, country) not in country_set:
country_set.add((key,country))
result.append([key, value, country_code, country])
except KeyError:
# IMPORTANT
# The following code for an exception are hardcoded for those
# IPs which do have a mapping to a country code but they were
# not available in GeoIP.dat (most probably because it was
# not updated). People using this script can either report this
# code (under except) and or additional conditions IP addresses
# which cannot be mapped to a country code stored in GeoIP.dat
if value == '41.79.120.29':
country = country_code_to_country['SS']
if not key:
key = 'anonymous'
result.append([key, value, 'SS',
country_code_to_country['SS']])
elif (key, country) not in country_set:
country_set.add((key, country))
result.append([key, value, 'SS',
country_code_to_country['SS']])
edx_obj.generate_csv(result, ['Username', 'IP Address', 'Country Code',
'Country'], edx_obj.report_name(edx_obj.db.name,
__name__.split('.')[-1]))
| 46.818182 | 87 | 0.59288 |
27273305d6a729de5868306fbc02e0fe222791f8 | 960 | py | Python | DAFNI-wrappers/base-image/dockerFiles/settings.py | roseDickinson/nismod2 | 5c571b055a6f5dc26bdc2bc8950b9cf1c9202fca | [
"MIT"
] | 5 | 2019-12-18T15:27:28.000Z | 2020-10-03T09:10:23.000Z | DAFNI-wrappers/base-image/dockerFiles/settings.py | roseDickinson/nismod2 | 5c571b055a6f5dc26bdc2bc8950b9cf1c9202fca | [
"MIT"
] | 11 | 2019-12-18T14:53:03.000Z | 2021-12-30T07:26:46.000Z | DAFNI-wrappers/base-image/dockerFiles/settings.py | roseDickinson/nismod2 | 5c571b055a6f5dc26bdc2bc8950b9cf1c9202fca | [
"MIT"
] | 3 | 2019-12-18T15:27:39.000Z | 2022-02-25T07:04:06.000Z | """
The settings module which points the settings at environment variables.
"""
import os
from pathlib import Path
# Static settings
INPUT_PATH = Path("/data/inputs/")
OUTPUT_PATH = Path("/data/outputs/")
CONFIG_FILE = Path("/code/script_config.ini")
RESULTS_PATH = Path("/code/nismod2/results/")
NISMOD_PATH = Path("/code/nismod2/")
NISMOD_DATA_PATH = NISMOD_PATH.joinpath("data/")
TRANSPORT_ADDITIONAL_OUTPUTS_PATH = NISMOD_DATA_PATH.joinpath("transport/gb/output/")
NISMOD_SCENARIOS_PATH = NISMOD_DATA_PATH.joinpath("scenarios/")
NISMOD_SOCIO_ECONOMIC_PATH = NISMOD_SCENARIOS_PATH.joinpath("socio-economic/")
# User settable settings
model_to_run = os.getenv("model_to_run", "error")
part_of_sos_model = os.getenv("part_of_sos_model", False)
sector_model = os.getenv("sector_model", "error")
timestep = os.getenv("timestep", "")
transforms_to_run = os.getenv("transforms_to_run", "[]")
use_generated_scenario = os.getenv("use_generated_scenario", False)
| 36.923077 | 85 | 0.779167 |
d236664b390f83d9798389cfd0fa72d2e359b280 | 16,138 | py | Python | uvcgan/base/networks.py | LS4GAN/uvcgan | 376439ae2a9be684ff279ddf634fe137aadc5df5 | [
"BSD-2-Clause"
] | 20 | 2022-02-14T22:36:19.000Z | 2022-03-29T06:31:30.000Z | uvcgan/base/networks.py | LS4GAN/uvcgan | 376439ae2a9be684ff279ddf634fe137aadc5df5 | [
"BSD-2-Clause"
] | 1 | 2022-03-09T17:23:30.000Z | 2022-03-09T17:23:30.000Z | uvcgan/base/networks.py | LS4GAN/uvcgan | 376439ae2a9be684ff279ddf634fe137aadc5df5 | [
"BSD-2-Clause"
] | 3 | 2022-02-14T22:36:41.000Z | 2022-03-20T12:53:29.000Z | # LICENSE
# This file was extracted from
# https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
# Please see `uvcgan/base/LICENSE` for copyright attribution and LICENSE
# pylint: disable=line-too-long
# pylint: disable=redefined-builtin
# pylint: disable=too-many-arguments
# pylint: disable=unidiomatic-typecheck
# pylint: disable=super-with-arguments
import functools
import torch
from torch import nn
class Identity(nn.Module):
# pylint: disable=no-self-use
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda _features : Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def join_args(a, b):
return { **a, **b }
def select_base_generator(model, **kwargs):
default_args = dict(norm = 'instance', use_dropout = False, ngf = 64)
kwargs = join_args(default_args, kwargs)
if model == 'resnet_9blocks':
return ResnetGenerator(n_blocks = 9, **kwargs)
if model == 'resnet_6blocks':
return ResnetGenerator(n_blocks = 6, **kwargs)
if model == 'unet_128':
return UnetGenerator(num_downs = 7, **kwargs)
if model == 'unet_256':
return UnetGenerator(num_downs = 8, **kwargs)
raise ValueError("Unknown generator: %s" % model)
def select_base_discriminator(model, **kwargs):
default_args = dict(norm = 'instance', ndf = 64)
kwargs = join_args(default_args, kwargs)
if model == 'basic':
return NLayerDiscriminator(n_layers = 3, **kwargs)
if model == 'n_layers':
return NLayerDiscriminator(**kwargs)
if model == 'pixel':
return PixelDiscriminator(**kwargs)
raise ValueError("Unknown discriminator: %s" % model)
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, image_shape, ngf=64, norm = 'batch', use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert n_blocks >= 0
super().__init__()
norm_layer = get_norm_layer(norm_type = norm)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(image_shape[0], ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, image_shape[0], kernel_size=7, padding=0)]
if image_shape[0] == 3:
model.append(nn.Sigmoid())
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super().__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
# pylint: disable=no-self-use
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, image_shape, num_downs, ngf=64, norm = 'batch', use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
norm_layer = get_norm_layer(norm_type=norm)
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for _i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(image_shape[0], ngf, input_nc=image_shape[0], submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
# pylint: disable=too-many-locals
super().__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv ]
if outer_nc == 3:
up.append(nn.Sigmoid())
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, image_shape, ndf=64, n_layers=3, norm='batch', max_mult=8):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
norm_layer = get_norm_layer(norm_type = norm)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(image_shape[0], ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, max_mult)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, max_mult)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, image_shape, ndf=64, norm='batch'):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
norm_layer = get_norm_layer(norm_type=norm)
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(image_shape[0], ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
| 40.446115 | 178 | 0.603297 |
21618d722a35f8b86ebdeb0330a685455daf8fca | 5,275 | py | Python | keyring.py | strickyak/aphid | 12469858facdc9d7f110bf6c895e58eae9fb728f | [
"MIT"
] | 2 | 2015-05-25T10:47:30.000Z | 2017-12-12T18:15:00.000Z | keyring.py | strickyak/aphid | 12469858facdc9d7f110bf6c895e58eae9fb728f | [
"MIT"
] | null | null | null | keyring.py | strickyak/aphid | 12469858facdc9d7f110bf6c895e58eae9fb728f | [
"MIT"
] | null | null | null | from go import bytes, encoding.hex, fmt, io.ioutil, regexp
from go.crypto import rand, sha256
import conv, dh, flag, sym
from "github.com/strickyak/rye/contrib" import data
"The global RingDict."
RingDict = {}
Ring = {}
RingFilename = flag.String('ring', '', 'name of keyring file')
RingSecret = flag.String('secret', '', 'passphrase of keyring file')
RE_HEX = regexp.MustCompile('^[0-9a-f]+$').FindString
RE_BASE64 = regexp.MustCompile('^[-A-Za-z0-9_=]+$').FindString
SavedSecretCipher = None
def LazySecretCipher():
if not SavedSecretCipher:
SavedSecretCipher = sym.Cipher(sha256.Sum256(RingSecret.X))
return SavedSecretCipher
def SealWithSecret(plain):
return conv.Encode64(LazySecretCipher().Seal(plain, 'WithSecret'))
def OpenWithSecret(encrypted):
plain, serial = LazySecretCipher().Open(conv.Decode64(encrypted))
must serial == 'WithSecret'
return plain
class DhKey:
def __init__(d):
.id = d['id']
.pub = d['pub']
must .pub
.pub = str(.pub)
.sec = d.get('sec') if d.get('sec') else OpenWithSecret(d['Xsec']) if d.get('Xsec') else None
.sec = None if .sec is None else str(.sec)
must RE_BASE64(.pub)
.o_pub = dh.Big(.pub) # big.Int
if .sec:
must RE_BASE64(.sec)
.o_sec = dh.DhSecret(group=dh.GROUP, pub=.o_pub, sec=dh.Big(.sec)) # DhSecret
class SymKey:
def __init__(d):
.id = d['id']
.num = d['num']
.sym = d['sym'] if d.get('sym') else conv.EncodeHex(OpenWithSecret(d['Xsym']))
must .sym
.sym = str(.sym)
must RE_HEX(.sym)
must len(.sym)== sym.KEY_HEX_LEN
.b_sym = sym.DecodeHex(.sym)
class WebKey:
def __init__(d):
.id = d['id']
.num = d['num']
.xor = d['xor'] if d.get('xor') else OpenWithSecret(d['Xxor'])
.base = d['base']
must RE_HEX(.xor)
must len(.xor)== sym.KEY_HEX_LEN
.b_xor = sym.DecodeHex(.xor)
class HashedPw:
def __init__(d):
.id = d['id']
.doubleHash = d['doubleHash'] if d.get('doubleHash') else OpenWithSecret(d['XdoubleHash'])
.salt = d['salt']
def CompileDicts(d):
"""CompileDicts the dict of dicts into the dict of objects."""
ring = {}
for k, v in d.items():
switch v['TYPE']:
case 'pw/doubleHash':
ring[k] = HashedPw(v)
case 'dh':
ring[k] = DhKey(v)
case 'sym/aes256':
ring[k] = SymKey(v)
case 'web/aes256':
ring[k] = WebKey(v)
default:
raise 'Unknown Type', v['TYPE']
return ring
def Load(filename=None):
"""Load the ring dict from the file."""
global RingDict, Ring
filename = filename if filename else RingFilename.X
s = str(ioutil.ReadFile(filename)).strip()
if s:
RingDict = data.Eval(s)
else:
RingDict = {}
Ring = CompileDicts(RingDict)
def Save(filename=None):
"""Save the ring dict to the file."""
filename = filename if filename else RingFilename.X
for k, v in RingDict.items():
print '=====', k, '====='
print v
print '====='
Ring = CompileDicts(RingDict)
ioutil.WriteFile(filename, data.PrettyPrint(RingDict), 0600)
def main(args):
args = flag.Munch(args)
Load(None) # TODO local bug
if args:
cmd = args.pop(0)
say cmd, args
switch cmd:
case "nop":
pass
case "mkpw":
key_id = args.pop(0)
pw = args.pop(0)
hex_salt = conv.EncodeHex(sym.RandomKey())
RingDict[key_id] = dict(
id=key_id,
TYPE='pw/doubleHash',
salt=hex_salt,
doubleHash=None if RingSecret.X else conv.DoubleHash(pw, hex_salt),
XdoubleHash=SealWithSecret(conv.DoubleHash(pw, hex_salt)) if RingSecret.X else None,
)
case "mkdh":
key_id = args.pop(0)
must not args
pair = dh.Forge(group=dh.GROUP)
RingDict[key_id] = dict(
id=key_id,
TYPE='dh',
pub=dh.String(pair.pub),
sec=None if RingSecret.X else dh.String(pair.sec),
Xsec=SealWithSecret(dh.String(pair.sec)) if RingSecret.X else None,
)
case "mksym":
key_id = args.pop(0)
key_num = args.pop(0)
must not args
bb = sym.RandomKey()
say RingDict
say key_id, key_num, bb
RingDict[key_id] = dict(
num=key_num,
id=key_id,
TYPE='sym/aes256',
sym=None if RingSecret.X else conv.EncodeHex(bb),
Xsym=SealWithSecret(bb) if RingSecret.X else None,
)
say RingDict
case "mkweb":
# First find the base key.
# Then use "mkweb" to make the derived web key.
# Do not publish the base key.
key_id = args.pop(0)
key_num = args.pop(0)
key_base = args.pop(0)
pw = args.pop(0)
must not args
base = Ring[key_base]
basekey = base.b_sym
say basekey
pwhash = sha256.Sum256(pw)
say pwhash
assert len(pwhash) == sym.KEY_BYT_LEN
xorkey = basekey ^ pwhash
say xorkey
assert len(xorkey) == sym.KEY_BYT_LEN
RingDict[key_id] = dict(
num=key_num,
id=key_id,
base=key_base,
TYPE='web/aes256',
xor=None if RingSecret.X else conv.EncodeHex(xorkey),
Xxor=SealWithSecret(xorkey) if RingSecret.X else None,
)
default:
raise 'Unknown command:', cmd
Save(None)
| 26.77665 | 97 | 0.603791 |
2cbc1179ee9458695bdbf1b161ff5dcd4f2aeeea | 1,866 | py | Python | tests/contract_tests/KT1VYsVfmobT7rsMVivvZ4J8i3bPiqz12NaH/test_vysvfm_mint.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 98 | 2019-02-07T16:33:38.000Z | 2022-03-31T15:53:41.000Z | tests/contract_tests/KT1VYsVfmobT7rsMVivvZ4J8i3bPiqz12NaH/test_vysvfm_mint.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 152 | 2019-05-20T16:38:56.000Z | 2022-03-30T14:24:38.000Z | tests/contract_tests/KT1VYsVfmobT7rsMVivvZ4J8i3bPiqz12NaH/test_vysvfm_mint.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 34 | 2019-07-25T12:03:51.000Z | 2021-11-11T22:23:38.000Z | from unittest import TestCase
from os.path import dirname, join
import json
from pytezos.michelson.program import MichelsonProgram
from pytezos.michelson.types.big_map import big_map_diff_to_lazy_diff
from pytezos.michelson.forge import forge_micheline, unforge_micheline
folder = 'dexter_usdtz_xtz'
entrypoint = 'removeLiquidity'
class MainnetOperationTestCaseVYSVFM(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'mint.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'mint'
cls.operation = operation
# cls.maxDiff = None
def test_parameters_vysvfm(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
# pprint(py_obj)
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_vysvfm(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_diff = big_map_diff_to_lazy_diff(self.operation['big_map_diff'])
extended_storage = storage.merge_lazy_diff(lazy_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
# pprint(py_obj)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
| 38.875 | 112 | 0.720793 |
5dd4690029de33ad33881f86ffad75ef3ba57a82 | 668 | py | Python | algorithm/jinshan.py | sheldonl3/python_learning | cfd82e90c3d4104339164c082a8964405b13dd2f | [
"MIT"
] | null | null | null | algorithm/jinshan.py | sheldonl3/python_learning | cfd82e90c3d4104339164c082a8964405b13dd2f | [
"MIT"
] | null | null | null | algorithm/jinshan.py | sheldonl3/python_learning | cfd82e90c3d4104339164c082a8964405b13dd2f | [
"MIT"
] | null | null | null | def work(N):
if N == 0:
return
if N == 1:
print(1)
return
if N == 2:
tmp=[1,1,1]
print(1)
for each in tmp:
print(each, end=' ')
return
tmp = [1, 1, 1]
print(1)
for each in tmp:
print(each, end=' ')
a, b = 1, 1
lis = [1, 1]
for _ in range(N - 2):
a, b = b, a + b
lis.append(b)
l = len(lis)
left = lis[:l - 1]
left.reverse()
tmp = lis + left
# print(tmp)
for each in tmp:
print(each, end=' ')
print()
return
if __name__ == "__main__":
s = int(input())
work(s)
| 18.555556 | 32 | 0.396707 |
beaa0944277ea689df3fabaf124b79e81a196807 | 1,303 | py | Python | scripts/workers/id2tax.py | hurwitzlab/taxoner-patric | f761f4e0bcaae92340538ed4af94f39a37da44ed | [
"Apache-2.0"
] | null | null | null | scripts/workers/id2tax.py | hurwitzlab/taxoner-patric | f761f4e0bcaae92340538ed4af94f39a37da44ed | [
"Apache-2.0"
] | null | null | null | scripts/workers/id2tax.py | hurwitzlab/taxoner-patric | f761f4e0bcaae92340538ed4af94f39a37da44ed | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import argparse
from collections import defaultdict
if __name__ == "__main__":
parser = \
argparse.ArgumentParser(description="Script to count taxas.")
parser.add_argument("-f", "--file", action="store", \
help="File to count taxas from")
parser.add_argument("-o1", "--out1", action="store", \
help="Name for the id_to_taxa file")
parser.add_argument("-o2", "--out2", action="store", \
help="Named for taxa_to_count file")
args = vars(parser.parse_args())
file_in = open(args["file"],"r")
file_out1 = open(args["out1"],"w")
file_out2 = open(args["out2"],"w")
id_to_taxa={}
taxa_to_count=defaultdict(int)
for line in file_in:
line.rstrip('\n')
if (':' in line):
cols=line.split('\t')
#read_id tax_id gi_id alignment_score match_start match_end
#HWI-ST885:65:C07WUACXX:2:1101:10000:12455 245014 291561105 1.700 487123 487223
read_id=cols[0]
taxa=cols[1]
id_to_taxa.update({read_id:taxa})
taxa_to_count[taxa] += 1
for read_id in id_to_taxa:
file_out1.write(read_id + "\t" + id_to_taxa[read_id] + "\n")
for taxa in taxa_to_count:
file_out2.write(taxa + "\t" + str(taxa_to_count[taxa]) + "\n")
file_in.close()
file_out1.close()
file_out2.close()
| 28.326087 | 87 | 0.646969 |
c8a6c33dc32b150b6139b1eddf5670e19dccea60 | 1,499 | py | Python | gapid_tests/command_buffer_tests/DispatchAndDispatchIndirect_test/DispatchAndDispatchIndirect_test.py | RenfengLiu/vulkan_test_applications | 04359b7184ad94659810213ff63ae71296426182 | [
"Apache-2.0"
] | 55 | 2017-06-20T13:54:31.000Z | 2022-02-08T23:58:11.000Z | gapid_tests/command_buffer_tests/DispatchAndDispatchIndirect_test/DispatchAndDispatchIndirect_test.py | RenfengLiu/vulkan_test_applications | 04359b7184ad94659810213ff63ae71296426182 | [
"Apache-2.0"
] | 53 | 2017-06-15T19:23:07.000Z | 2022-03-30T19:56:30.000Z | gapid_tests/command_buffer_tests/DispatchAndDispatchIndirect_test/DispatchAndDispatchIndirect_test.py | RenfengLiu/vulkan_test_applications | 04359b7184ad94659810213ff63ae71296426182 | [
"Apache-2.0"
] | 42 | 2017-06-15T19:05:40.000Z | 2022-03-30T14:15:25.000Z | # Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, GapitTest
from gapit_test_framework import require, require_equal, require_not_equal
@gapit_test("DispatchAndDispatchIndirect_test")
class DispatchGroupCountX512(GapitTest):
def expect(self):
dispatch = require(
self.next_call_of("vkCmdDispatch"))
require_not_equal(0, dispatch.int_commandBuffer)
require_equal(512, dispatch.int_groupCountX)
require_equal(1, dispatch.int_groupCountY)
require_equal(1, dispatch.int_groupCountZ)
@gapit_test("DispatchAndDispatchIndirect_test")
class DispatchIndirectWithOffsetZero(GapitTest):
def expect(self):
dispatch_indirect = require(
self.next_call_of("vkCmdDispatchIndirect"))
require_not_equal(0, dispatch_indirect.int_commandBuffer)
require_not_equal(0, dispatch_indirect.int_buffer)
require_equal(0, dispatch_indirect.int_offset)
| 38.435897 | 74 | 0.762508 |
17b91ebb149ff794e7ab9e3c0e500dbf74dd6ee0 | 2,039 | py | Python | src/common.py | nagisc007/pythoncitest | 5200f1e8ae2969ffb733fb2239e9bbb7e700e836 | [
"MIT"
] | null | null | null | src/common.py | nagisc007/pythoncitest | 5200f1e8ae2969ffb733fb2239e9bbb7e700e836 | [
"MIT"
] | null | null | null | src/common.py | nagisc007/pythoncitest | 5200f1e8ae2969ffb733fb2239e9bbb7e700e836 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from acttypes import ActType
class Act(object):
""" basic action class.
"""
def __init__(self, subject, act_type, action, description, with_subject=False):
self.action = action
self.act_type = act_type
self.description = description
self.subject = subject
self.with_subject = with_subject
class Title(Act):
""" For title act
"""
def __init__(self, title, desc=""):
super().__init__(self, ActType.SYMBOL, title, desc)
class Chapter(Act):
""" For chapter start act
"""
def __init__(self, chapter_title, desc=""):
super().__init__(self, ActType.SYMBOL, chapter_title, desc)
class Description(Act):
""" Nothing subject description act.
"""
def __init__(self, act, desc=""):
super().__init__(self, ActType.DESC, act, desc)
class Person(object):
""" basic character class.
"""
def __init__(self, name, age, sex, job):
self.name = name
self.age = age
self.sex = sex
self.job = job
def tell(self, what, desc="", with_subject=False):
''' For dialogue
'''
return Act(self, ActType.TELL, "「{}」".format(what), desc, with_subject)
class Stage(object):
""" basic stage class.
"""
def __init__(self, name, act):
self.name = name
self.act = act
def description(self, desc=""):
return Act(self, ActType.DESC, self.act, desc)
class Item(object):
""" basic item class.
"""
def __init__(self, name, act):
self.name = name
self.act = act
def description(self, desc=""):
return Act(self, ActType.DESC, self.act, desc)
class DayTime(object):
""" basic day and time class.
"""
def __init__(self, act, mon=0, day=0, year=0, hour=0):
self.year = year
self.mon = mon
self.day = day
self.hour = hour
self.act = act
def description(self, desc=""):
return Act(self, ActType.DESC, self.act, desc)
| 23.436782 | 83 | 0.583619 |
56c5e622b288ece0a397f1d8b7ae5f738d537404 | 5,218 | py | Python | pymatgen/analysis/tests/test_functional_groups.py | cajfisher/pymatgen | 286c304e38102d567723a71f733e0c304b72035d | [
"MIT"
] | 6 | 2015-02-06T08:27:09.000Z | 2021-02-28T14:42:52.000Z | pymatgen/analysis/tests/test_functional_groups.py | cajfisher/pymatgen | 286c304e38102d567723a71f733e0c304b72035d | [
"MIT"
] | null | null | null | pymatgen/analysis/tests/test_functional_groups.py | cajfisher/pymatgen | 286c304e38102d567723a71f733e0c304b72035d | [
"MIT"
] | 3 | 2018-10-17T19:08:09.000Z | 2021-12-02T20:26:58.000Z | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import warnings
from pymatgen.core.structure import Molecule
from pymatgen.analysis.graphs import MoleculeGraph
from pymatgen.analysis.local_env import OpenBabelNN
from pymatgen.analysis.functional_groups import FunctionalGroupExtractor
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files", "functional_groups")
try:
from openbabel import openbabel as ob
from openbabel import pybel as pb
import networkx as nx
except ImportError:
pb = None
ob = None
nx = None
__author__ = "Evan Spotte-Smith"
__version__ = "0.1"
__maintainer__ = "Evan Spotte-Smith"
__email__ = "ewcspottesmith@lbl.gov"
__status__ = "Beta"
__date__ = "July 2018"
__credit__ = "Peiyuan Yu"
@unittest.skipIf(not (pb and ob and nx), "OpenBabel or NetworkX not present. Skipping...")
class FunctionalGroupExtractorTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.file = os.path.join(test_dir, "func_group_test.mol")
self.mol = Molecule.from_file(self.file)
self.strat = OpenBabelNN()
self.mg = MoleculeGraph.with_local_env_strategy(self.mol, self.strat)
self.extractor = FunctionalGroupExtractor(self.mg)
def tearDown(self):
warnings.simplefilter("default")
del self.extractor
del self.mg
del self.strat
del self.mol
del self.file
def test_init(self):
# Ensure that instantiation is equivalent for all valid input types
extractor_str = FunctionalGroupExtractor(self.file)
extractor_mol = FunctionalGroupExtractor(self.mol)
extractor_mg = self.extractor
self.assertEqual(extractor_str.molgraph, extractor_mol.molgraph)
self.assertEqual(extractor_str.molgraph, extractor_mg.molgraph)
self.assertEqual(extractor_str.species, extractor_mol.species)
self.assertEqual(extractor_str.species, extractor_mg.species)
# Test optimization
file_no_h = os.path.join(test_dir, "func_group_test_no_h.mol")
extractor_no_h = FunctionalGroupExtractor(file_no_h, optimize=True)
self.assertEqual(len(extractor_no_h.molecule), len(extractor_mol.molecule))
self.assertEqual(extractor_no_h.species, extractor_mol.species)
def test_get_heteroatoms(self):
heteroatoms = self.extractor.get_heteroatoms()
hetero_species = [self.extractor.species[x] for x in heteroatoms]
self.assertEqual(len(heteroatoms), 3)
self.assertEqual(sorted(hetero_species), ["N", "O", "O"])
# Test with limitation
hetero_no_o = self.extractor.get_heteroatoms(elements=["N"])
self.assertEqual(len(hetero_no_o), 1)
def test_get_special_carbon(self):
special_cs = self.extractor.get_special_carbon()
self.assertEqual(len(special_cs), 4)
# Test with limitation
special_cs_no_o = self.extractor.get_special_carbon(elements=["N"])
self.assertEqual(len(special_cs_no_o), 2)
def test_link_marked_atoms(self):
heteroatoms = self.extractor.get_heteroatoms()
special_cs = self.extractor.get_special_carbon()
link = self.extractor.link_marked_atoms(heteroatoms.union(special_cs))
self.assertEqual(len(link), 1)
self.assertEqual(len(link[0]), 9)
# Exclude Oxygen-related functional groups
heteroatoms_no_o = self.extractor.get_heteroatoms(elements=["N"])
special_cs_no_o = self.extractor.get_special_carbon(elements=["N"])
all_marked = heteroatoms_no_o.union(special_cs_no_o)
link_no_o = self.extractor.link_marked_atoms(all_marked)
self.assertEqual(len(link_no_o), 2)
def test_get_basic_functional_groups(self):
basics = self.extractor.get_basic_functional_groups()
# Molecule has one methyl group which will be caught.
self.assertEqual(len(basics), 1)
self.assertEqual(len(basics[0]), 4)
basics_no_methyl = self.extractor.get_basic_functional_groups(func_groups=["phenyl"])
self.assertEqual(len(basics_no_methyl), 0)
def test_get_all_functional_groups(self):
heteroatoms = self.extractor.get_heteroatoms()
special_cs = self.extractor.get_special_carbon()
link = self.extractor.link_marked_atoms(heteroatoms.union(special_cs))
basics = self.extractor.get_basic_functional_groups()
all_func = self.extractor.get_all_functional_groups()
self.assertEqual(len(all_func), (len(link) + len(basics)))
self.assertEqual(sorted(all_func), sorted(link + basics))
def test_categorize_functional_groups(self):
all_func = self.extractor.get_all_functional_groups()
categorized = self.extractor.categorize_functional_groups(all_func)
self.assertTrue("O=C1C=CC(=O)[N]1" in categorized.keys())
self.assertTrue("[CH3]" in categorized.keys())
total_count = sum([c["count"] for c in categorized.values()])
self.assertEqual(total_count, 2)
if __name__ == "__main__":
unittest.main()
| 35.739726 | 93 | 0.705059 |
ff04b4092d8af4712e3274368555c4e9dfe42cb4 | 1,039 | py | Python | ymir/backend/src/pymir-app/app/schemas/runtime.py | under-chaos/ymir | 83e98186b23429e6027b187cdade247f5f93e5de | [
"Apache-2.0"
] | 1 | 2022-01-12T03:12:47.000Z | 2022-01-12T03:12:47.000Z | ymir/backend/src/pymir-app/app/schemas/runtime.py | under-chaos/ymir | 83e98186b23429e6027b187cdade247f5f93e5de | [
"Apache-2.0"
] | null | null | null | ymir/backend/src/pymir-app/app/schemas/runtime.py | under-chaos/ymir | 83e98186b23429e6027b187cdade247f5f93e5de | [
"Apache-2.0"
] | null | null | null | import enum
import json
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, EmailStr, Field, validator
from app.schemas.common import (
Common,
DateTimeModelMixin,
IdModelMixin,
IsDeletedModelMixin,
)
class RuntimeType(enum.IntEnum):
unknown = 0
training = 1
mining = 2
inference = 100
class RuntimeBase(BaseModel):
name: str
hash: str
type: RuntimeType
config: str
class RuntimeCreate(RuntimeBase):
...
class RuntimeUpdate(BaseModel):
name: str
hash: str
config: str
class RuntimeInDBBase(
IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, RuntimeBase
):
class Config:
orm_mode = True
class Runtime(RuntimeInDBBase):
config: str
@validator("config")
def unravel_config(cls, v: str, values: Dict[str, Any]) -> Dict[str, Any]:
if not v:
return {}
return json.loads(v)
class RuntimeOut(Common):
result: Union[Runtime, List[Runtime]]
| 17.610169 | 78 | 0.676612 |
0b2fc3669689c275dbab8a7fdf5e46a1407b4dab | 8,039 | py | Python | stratlib/bollinger_band.py | llmofang/backtest | 9a2b10ab604defcca7f4b7d4111526130d79192b | [
"Apache-2.0"
] | null | null | null | stratlib/bollinger_band.py | llmofang/backtest | 9a2b10ab604defcca7f4b7d4111526130d79192b | [
"Apache-2.0"
] | null | null | null | stratlib/bollinger_band.py | llmofang/backtest | 9a2b10ab604defcca7f4b7d4111526130d79192b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 03 13:06:56 2015
@author: Eunice
"""
if __name__ == '__main__':
import sys
sys.path.append("..")
from pyalgotrade import bar
from pyalgotrade import plotter
# 以上模块仅测试用
from pyalgotrade.broker.fillstrategy import DefaultStrategy
from pyalgotrade.broker.backtesting import TradePercentage
from pyalgotrade import strategy
from pyalgotrade.technical import macd
import matplotlib.pyplot as plt
from pyalgotrade.dataseries import SequenceDataSeries
from pyalgotrade.broker.backtesting import TradePercentage
from pyalgotrade.technical import bollinger
from pyalgotrade.technical import cross
from pyalgotrade.strategy.position import ShortPosition
from pyalgotrade.strategy.position import LongPosition
import numpy as np
from datetime import datetime
class bollinger_band(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, bollingerlength, numStdDev):
strategy.BacktestingStrategy.__init__(self, feed)
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.0008))
self.__instrument = instrument
self.__bollingerlength = int(bollingerlength)
self.__close = feed[instrument].getPriceDataSeries()
numStdDev = float(numStdDev) / 10
self.__longPos = []
self.__shortPos = []
self.__bollinger = bollinger.BollingerBands(self.__close, self.__bollingerlength, int(numStdDev))
self.__UpperBand = self.__bollinger.getUpperBand()
self.__LowerBand = self.__bollinger.getLowerBand()
self.__position = SequenceDataSeries()
self.__circ=5
self.__lastLongPos=None
self.__lastShortPos=None
self.__barNum=0
def getPrice(self):
return self.__prices
def getBollingerBands(self):
return self.__bollinger
def testCon(self):
# record position
#######################################################################
if len(self.__longPos) > 0:
self.__position.append(len(self.__longPos))
if len(self.__shortPos)>0 :
#print(self.__shortPos.getShares())
self.__position.append(-len(self.__shortPos))
elif len(self.__longPos)==0 and len(self.__shortPos)==0:
self.__position.append(0)
#print(0)
def getTest(self):
return self.__position
def onBars(self, bars):
bar = bars[self.__instrument]
lower = self.__bollinger.getLowerBand()[-1]
upper = self.__bollinger.getUpperBand()[-1]
self.__barNum=self.__barNum+1
# print(self.getActivePositions())
if lower is None or upper is None:
return
self.testCon()
if len(self.__longPos)> 0:
if self.exitLongSignal():
for pos in self.__longPos:
pos.exitMarket()
elif len(self.__shortPos)>0:
if self.exitShortSignal():
for pos in self.__shortPos:
pos.exitMarket()
if self.enterLongSignal():
for i in range(self.enterLongSignal()):
shares = int(self.getBroker().getEquity() * 0.2 / bars[self.__instrument].getPrice())
# self.__longPos.
self.__longPos.append(self.enterLong(self.__instrument, shares))
self.__lastLongPos=self.__barNum
#print('long'+str(shares))
elif self.enterShortSignal():
for i in range(self.enterShortSignal()):
shares = int(self.getBroker().getEquity() * 0.2 / bars[self.__instrument].getPrice())
self.__shortPos.append(self.enterShort(self.__instrument, shares))
self.__lastShortPos=self.__barNum
#print('short'+str(shares))
def enterLongSignal (self) :
if self.__lastLongPos is not None:
if self.__barNum-self.__lastLongPos<60:
return 0
if self.__UpperBand[-1-self.__circ] is None:
return 0
m1 = 0
for i in range(self.__circ):
if self.__close[-i-1] <= self.__LowerBand[-i-2]:
m1 += 1
if m1 >= self.__circ-1 and cross.cross_above(self.__close,self.__LowerBand)>0:
return 1
else:
return 0
def enterShortSignal(self) :
if self.__lastShortPos is not None:
if self.__barNum-self.__lastShortPos<60:
return 0
if self.__UpperBand[-1-self.__circ] is None:
return 0
m1 = 0
for i in range(self.__circ):
if self.__close[-i-1] >= self.__UpperBand[-i-2]:
m1 += 1
if m1 >= self.__circ-1 and cross.cross_below(self.__close,self.__UpperBand)>0:
return 1
else:
return 0
def exitLongSignal(self) :
if self.__UpperBand[-1-self.__circ] is None:
return False
m1 = 0
for i in range(self.__circ):
if self.__close[-i-1] >= self.__UpperBand[-i-2]:
m1 += 1
if m1 >= self.__circ-1 and cross.cross_below(self.__close,self.__UpperBand)>0:
return True
else:
return False
def exitShortSignal(self):
if self.__UpperBand[-1-self.__circ] is None:
return False
m1 = 0
for i in range(self.__circ):
if self.__close[-i-1] <= self.__LowerBand[-i-2]:
m1 += 1
if m1 >= self.__circ-1 and cross.cross_above(self.__close,self.__LowerBand)>0:
return True
else:
return False
def onEnterCanceled(self, position):
if self.__longPos[-1] == position:
del self.__longPos[-1]
self.__lastLongPos==None
elif self.__shortPos[-1] == position:
del self.__shortPos[-1]
self.__lastShortPos==None
else:
assert(False)
def onEnterOK(self,position):
pass
def onExitOk(self, position):
if isinstance(position,LongPosition):
self.__longPos = []
elif isinstance(position,ShortPosition):
self.__shortPos = []
else:
assert(False)
def onExitCanceled(self, position):
position.exitMarket()
if __name__ == "__main__":
strat = bollinger_band
instrument = '600016'
market = 'SZ'
date = '2016-03-11'
#toDate ='20160101'
frequency = bar.Frequency.SECOND
paras = [450, 28]
plot = True
#############################################path set ############################33
if frequency == bar.Frequency.MINUTE:
path = "..\\histdata\\min\\"
elif frequency == bar.Frequency.DAY:
path = "..\\histdata\\day\\"
elif frequency == bar.Frequency.SECOND:
path = "..\\histdata\\tick\\"
filepath = path +'stock_'+ instrument + "_"+date+".csv"
#############################################don't change ############################33
from pyalgotrade.barfeed.csvfeed import GenericBarFeed
barfeed = GenericBarFeed(frequency)
barfeed.setDateTimeFormat('%Y-%m-%d %H:%M:%S')
barfeed.addBarsFromCSV(instrument, filepath)
strat = strat(barfeed, instrument, *paras)
if plot:
plt = plotter.StrategyPlotter(strat)
plt.getInstrumentSubplot(instrument).addDataSeries("upper", strat.getBollingerBands().getUpperBand())
plt.getInstrumentSubplot(instrument).addDataSeries("middle", strat.getBollingerBands().getMiddleBand())
plt.getInstrumentSubplot(instrument).addDataSeries("lower", strat.getBollingerBands().getLowerBand())
position = strat.getTest()
plt.getOrCreateSubplot("position").addDataSeries("position", position)
#position = strat.getTest()
#plt.getOrCreateSubplot("position").addDataSeries("position", position)
#plt.getOrCreateSubplot("macd").addDataSeries('macd2',strat.getMACD2())
strat.run()
if plot:
plt.plot()
| 31.03861 | 111 | 0.602563 |
42dc4c4a1948c5b69eef5be773a009923fccc907 | 3,511 | py | Python | tests/test_writers.py | pierrepo/buildH | 4870ffc4fb41deec2c8af5ba5b589795bbb99563 | [
"BSD-3-Clause"
] | 13 | 2020-12-21T14:43:08.000Z | 2022-02-16T03:35:14.000Z | tests/test_writers.py | pierrepo/buildH | 4870ffc4fb41deec2c8af5ba5b589795bbb99563 | [
"BSD-3-Clause"
] | 137 | 2019-08-14T17:00:15.000Z | 2022-03-29T14:48:38.000Z | tests/test_writers.py | pierrepo/buildH | 4870ffc4fb41deec2c8af5ba5b589795bbb99563 | [
"BSD-3-Clause"
] | 6 | 2019-08-30T08:00:22.000Z | 2022-01-19T20:06:24.000Z | """
Unit tests for buildH_calcOP.
Test functions from module writers.
"""
import pathlib
import filecmp
import pytest
import pandas as pd
import MDAnalysis as mda
from buildh import lipids
from buildh import init_dics
from buildh import core
from buildh import writers
DIR_DATA = "test_data"
PATH_ROOT_DATA = pathlib.Path(__file__).parent / DIR_DATA
# Ignore some MDAnalysis warnings for this test file
pytestmark = pytest.mark.filterwarnings('ignore::UserWarning')
def test_pandas2pdb():
"""Test for pandasdf2pdb()."""
# Create a dummy dataframe
rows = [[1, "C1", "POPC", 1, 34.42, 46.94, 26.31],
[2, "H211", "POPC", 1, 1.00, 2.00, 3.00]
]
df = pd.DataFrame(rows, columns=["atnum", "atname", "resname", "resnum",
"x", "y", "z"])
ref_pdb_lines = ('ATOM 1 C1 POPC 1 34.420 46.940 26.310 1.00 0.00 C\n'
'ATOM 2 H211 POPC 1 1.000 2.000 3.000 1.00 0.00 H\n')
assert writers.pandasdf2pdb(df) == ref_pdb_lines
class TestWriters():
"""Test class for the writing functions."""
# path for the Berger POPC files
PATH_DATA = PATH_ROOT_DATA / "Berger_POPC"
# Method called once per class.
def setup_class(self):
"""Initialize all data."""
lipids_tops = lipids.read_lipids_topH([lipids.PATH_JSON/"Berger_POPC.json"])
# Input parameters
self.pdb = self.PATH_DATA / "10POPC.pdb"
self.defop = self.PATH_DATA / "OP_def_BergerPOPC.def"
self.dic_lipid = lipids_tops["Berger_POPC"]
self.begin = 0
self.end = 1
# attributes
self.universe_woH = mda.Universe(str(self.pdb))
self.dic_atname2genericname = init_dics.make_dic_atname2genericname(self.defop)
self.dic_OP, self.dic_corresp_numres_index_dic_OP = init_dics.init_dic_OP(self.universe_woH,
self.dic_atname2genericname,
self.dic_lipid['resname'])
self.dic_Cname2Hnames = init_dics.make_dic_Cname2Hnames(self.dic_OP)
# Compute the order parameter
core.fast_build_all_Hs_calc_OP(self.universe_woH,self.begin, self.end,
self.dic_OP, self.dic_lipid, self.dic_Cname2Hnames)
def test_write_OP(self, tmpdir):
"""Test for write_OP().
Parameters
----------
tmpdir: function
pytest callback which return a unique directory.
"""
#Write results of self.dic_OP
test_file = tmpdir / "test.out"
writers.write_OP(test_file, self.dic_atname2genericname,
self.dic_OP, self.dic_lipid['resname'])
ref_file = self.PATH_DATA / "ref_10POPC.out"
assert filecmp.cmp(test_file, ref_file)
def test_write_OP_alternate(self, tmpdir):
"""Test for write_OP_alternate().
Parameters
----------
tmpdir: function
pytest callback which return a unique directory.
"""
#Write results of self.dic_OP
test_file = tmpdir / "test.out"
writers.write_OP_alternate(test_file, self.universe_woH,
self.dic_OP, self.dic_lipid['resname'])
ref_file = self.PATH_DATA / "ref_10POPC.alternate.out"
assert filecmp.cmp(test_file, ref_file)
| 33.759615 | 110 | 0.592139 |
b0917532ad3b598f5ceb117d90e19bc22c73f8d4 | 3,849 | py | Python | plot_functions/reflectivity_series_plot.py | Silvermoonsniper/Normal-Moveout-for-Seismic-Signals-for-Distributed-Velocity-Estimation-in-Subsurfaces | ff1144d485a0a48f380289ffc4a8637f7d384b8e | [
"Unlicense"
] | 1 | 2021-12-13T15:05:52.000Z | 2021-12-13T15:05:52.000Z | plot_functions/reflectivity_series_plot.py | Silvermoonsniper/Normal-Moveout-for-Seismic-Signals-for-Distributed-Velocity-Estimation-in-Subsurfaces | ff1144d485a0a48f380289ffc4a8637f7d384b8e | [
"Unlicense"
] | null | null | null | plot_functions/reflectivity_series_plot.py | Silvermoonsniper/Normal-Moveout-for-Seismic-Signals-for-Distributed-Velocity-Estimation-in-Subsurfaces | ff1144d485a0a48f380289ffc4a8637f7d384b8e | [
"Unlicense"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
#function to plot reflectivity series
#input args:
# LS:reflectivity series
# time_array: time series for plot
# time_array1: time series for plotting ground truth series
# newdesired: ground truth series
# trace_number: indice of receivers
# r: indice of receiver in receier array
# peak_source: double shifted number of datapoints in the ricker wavelet
def reflectivity_plot(LS,time_array,time_array1,newdesired,trace_number,r,peak_source):
plot_flag = 1
if plot_flag == 1:
plt.plot(time_array[0:len(time_array)], LS[0:len(time_array)])
newdesired[r]=np.pad(newdesired[r], (int(0.5*peak_source), 0), 'constant')
plt.plot(time_array1[0:50000], newdesired[r][0: 50000])
# plt.plot(time_array[0:4000],original_trac[0:4000])
# plt.plot(time_array[0:4000],post_processing[r][0:4000])
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
# plt.legend(['original seismic trace'])
plt.legend(['$\hat{\mathbf{\mu}}_d$', 'ground truth'])
plt.title('The orignial seismic trace')
plt.title('The Comparison between reflectivity series and ground truth reflection')
plt.show()
#function to plot multiple reflectivity series
#input args:
# time_array: time series for plotting
# reflectivity: reflectivity series at multiple receivers
# time_array1: time series for plotting ground truth series
# newdesired: ground truth series
# peak_source: double shifted number of datapoints in the ricker wavelet
def multiple_reflectivity_plot(time_array,reflectivity,time_array1,newdesired,peak_source):
# plot result
plot_flag = 1
if plot_flag == 1:
f = plt.figure(figsize=(10, 10))
newdesired[5] = np.pad(newdesired[5], (int(0.5 * peak_source), 0), 'constant')
newdesired[15] = np.pad(newdesired[15], (int(0.5 * peak_source), 0), 'constant')
newdesired[25] = np.pad(newdesired[25], (int(0.5 * peak_source), 0), 'constant')
newdesired[35] = np.pad(newdesired[35], (int(0.5 * peak_source), 0), 'constant')
newdesired[45] = np.pad(newdesired[45], (int(0.5 * peak_source), 0), 'constant')
newdesired[55] = np.pad(newdesired[55], (int(0.5 * peak_source), 0), 'constant')
f, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1, sharey=True)
ax1.plot(time_array[0:len(time_array)], reflectivity[0][0:len(time_array)], label='Source')
ax2.plot(time_array[0:len(time_array)], reflectivity[1][0:len(time_array)], label='Source')
ax3.plot(time_array[0:len(time_array)], reflectivity[2][0:len(time_array)], label='Source')
ax4.plot(time_array[0:len(time_array)], reflectivity[3][0:len(time_array)], label='Source')
ax5.plot(time_array[0:len(time_array)], reflectivity[4][0:len(time_array)], label='Source')
ax6.plot(time_array[0:len(time_array)], reflectivity[5][0:len(time_array)], label='Source')
# overlay wih ground truth
ax1.plot(time_array1[0:50000], newdesired[5][0: 50000], label='Source')
ax2.plot(time_array1[0:50000], newdesired[15][0: 50000], label='Source')
ax3.plot(time_array1[0:50000], newdesired[25][0: 50000], label='Source')
ax4.plot(time_array1[0:50000], newdesired[35][0: 50000], label='Source')
ax5.plot(time_array1[0:50000], newdesired[45][0: 50000], label='Source')
ax6.plot(time_array1[0:50000], newdesired[55][0: 50000], label='Source')
ax1.set_title('Reflectivity Series for multiple receivers')
ax6.set_xlabel('time (s)')
ax3.set_ylabel('normalized amplitude')
plt.tight_layout()
f.legend(['$\hat{\mathbf{\mu}}_d$', 'ground truth'])
plt.show() | 58.318182 | 100 | 0.654716 |
5fa3b817a0eed75ef218850815dac1a31b7639e4 | 39,765 | py | Python | pyoptsparse/pyALPSO/alpso_ext.py | robfalck/pyoptsparse | c99f4bfe8961492d0a1879f9ecff7a2fbb3c8c1d | [
"CNRI-Python"
] | null | null | null | pyoptsparse/pyALPSO/alpso_ext.py | robfalck/pyoptsparse | c99f4bfe8961492d0a1879f9ecff7a2fbb3c8c1d | [
"CNRI-Python"
] | null | null | null | pyoptsparse/pyALPSO/alpso_ext.py | robfalck/pyoptsparse | c99f4bfe8961492d0a1879f9ecff7a2fbb3c8c1d | [
"CNRI-Python"
] | null | null | null | #!/usr/bin/env python
"""
alpso - Python Version of the Augmented Lagrangian Particle Swarm Optimizer
alpso if a global optimizer which solves problems of the form:
min F(x)
subject to: Gi(x) = 0, i = 1(1)ME
Gj(x) <= 0, j = ME+1(1)M
xLB <= x <= xUB
Copyright (c) 2006-2011 by Dr. Ruben E. Perez and Mr. Peter Jansen
All rights reserved. Not to be used for commercial purposes.
Revision: 1.7 $Date: 04/03/2010 21:00$
Developers:
-----------
- Dr. Ruben E. Perez (RP)
- Mr. Peter W. Jansen (PJ)
History
-------
v. 1.2 - Initial Code Development in C (RP, 2006)
v. 1.3 - Initial Migration from C to Python (RP,PJ 2008)
v. 1.4 - Migration Bug Fixes (PJ, 2008)
v. 1.5 - Migration Running Version (PJ, 2008)
v. 1.6 - Migration Scaling Option (PJ, 2008)
- Removed lambda convergence citeria (PJ, 2009)
- Added Multiple x0s Input Functionality (PJ, 2010)
v. 1.7 - Added Neighbourhood option (PJ, 2010)
"""
__version__ = '$Revision: $'
'''
To Do:
- Migrate Inner Loop Printing Option
- Add Other Inertia and Velocity Updates to Inner Loop
- Fix Neighbourhood best from Lagrangian value
'''
# =============================================================================
# Standard Python modules
# =============================================================================
import os, sys, random, time
import pdb
from math import floor
# =============================================================================
# External Python modules
# =============================================================================
import numpy
from ..pyOpt_error import pyOptSparseWarning
# =============================================================================
# Extension modules
# =============================================================================
# =============================================================================
# Misc Definitions
# =============================================================================
inf = 10.E+20 # define a value for infinity
# =============================================================================
eps = 1.0 # define a value for machine precision
while (eps / 2.0 + 1.0) > 1.0:
eps /= 2.0
eps *= 2.0
# eps = math.ldexp(1,-52)
# ==============================================================================
# alpso function
# ==============================================================================
def alpso(dimensions, constraints, neqcons, xtype, x0, xmin, xmax, swarmsize, nhn,
nhm, maxOutIter, maxInnIter, minInnIter, stopCriteria, stopIters, etol,
itol, rtol, atol, dtol, prtOutIter, prtInnIter, r0, vinit, vmax, c1, c2, w1, w2,
ns, nf, vcrazy, fileout, filename, logfile, hstfile, rseed, scale, nhs, objfunc):
"""
Python Version of the Augmented Lagrangian Particle Swarm Optimizer
Documentation last updated: April. 29, 2008 - Ruben E. Perez
"""
#
if x0 != []:
if isinstance(x0, list):
x0 = numpy.array(x0)
elif not isinstance(x0, numpy.ndarray):
pyOptSparseWarning(("Initial x must be either list or numpy.array, "
"all initial positions randomly generated"))
#
if hstfile is not None:
h_start = True
else:
h_start = False
if logfile is not None:
sto_hst = True
else:
sto_hst = False
# Set random number seed
rand = random.Random()
if rseed == {}:
rseed = time.time()
rand.seed(rseed)
#
if filename == '':
filename = 'ALPSO.out'
ofname = ''
sfname = ''
fntmp = filename.split('.')
if len(fntmp) == 1:
ofname += fntmp[0] + '_print.out'
sfname += fntmp[0] + '_summary.out'
else:
if '/' not in fntmp[-1] and '\\' not in fntmp[-1]:
ofname += filename[:filename.rfind('.')] + '_print.' + fntmp[-1]
sfname += filename[:filename.rfind('.')] + '_summary.' + fntmp[-1]
else:
ofname += filename + '_print.out'
sfname += filename + '_summary.out'
header = ''
header += ' ' * 37 + '======================\n'
header += ' ' * 39 + ' ALPSO 1.1 (Bulk)\n'
header += ' ' * 37 + '======================\n\n'
header += 'Parameters:\n'
header += '-' * 97 + '\n'
if maxInnIter != minInnIter:
diI = 1
else:
diI = 0
if x0 != []:
if len(x0.shape) == 1:
nxi = 1
else:
nxi = x0.shape[0]
else:
nxi = 0
header += 'Swarmsize :%9d' % swarmsize + ' MaxOuterIters :%9d' % maxOutIter + ' Seed:%26.8f\n' % rseed
header += 'Cognitive Parameter :%9.3f' % c1 + ' MaxInnerIters :%9d' % maxInnIter + ' Scaling :%11d\n' % scale
header += 'Social Parameter :%9.3f' % c2 + ' MinInnerIters :%9d' % minInnIter + ' Stopping Criteria :%11d\n' % stopCriteria
header += 'Initial Weight :%9.3f' % w1 + ' DynInnerIters :%9d' % diI + ' Number of Failures :%11d\n' % ns
header += 'Final Weight :%9.3f' % w2 + ' StoppingIters :%9d' % stopIters + ' Number of Successes:%11d\n\n' % nf
header += 'Absolute Tolerance : %1.2e' % atol + ' Number Initial Pos:%9d' % nxi + ' Neighbourhood Model:%11s\n' % nhm
header += 'Relative Tolerance : %1.2e' % rtol + ' Initial Velocity :%9d' % vinit + ' Neighbourhood Size :%11d\n' % nhn
header += 'Inequality Tolerance: %1.2e' % itol + ' Maximum Velocity :%9d' % vmax + ' Selfless :%11d\n' % nhs
header += 'Equality Tolerance : %1.2e' % etol + ' Craziness Velocity: %1.2e' % vcrazy + ' Fileout :%11d\n' % fileout
header += 'Global Distance : %1.2e' % dtol + ' Initial Penalty :%9.2f' % r0 + ' File Name :%11s\n' % filename
header += '-' * 97 + '\n\n'
if (fileout == 1) or (fileout == 3):
if os.path.isfile(ofname):
os.remove(ofname)
ofile = open(ofname, 'w')
ofile.write(header)
if (fileout == 2) or (fileout == 3):
if os.path.isfile(sfname):
os.remove(sfname)
sfile = open(sfname, 'w')
sfile.write(header)
#
dt = 1.0
vlimit = vmax
vmax = numpy.ones(dimensions, float) * vmax
if scale == 1:
space_centre = numpy.zeros(dimensions, float)
space_halflen = numpy.zeros(dimensions, float)
for j in range(dimensions):
space_centre[j] = (xmin[j] + xmax[j]) / 2.0
space_halflen[j] = ((xmax[j] - xmin[j]) / 2.0)
xmin = -numpy.ones(dimensions, float)
xmax = numpy.ones(dimensions, float)
else:
for j in range(dimensions):
vmax[j] = ((xmax[j] - xmin[j]) / 2.0) * vlimit
# Initialize the positions and velocities for entire population
x_k = numpy.zeros((swarmsize, dimensions), float)
v_k = numpy.zeros((swarmsize, dimensions), float)
discrete_i = []
for i in range(swarmsize):
for j in range(dimensions):
x_k[i, j] = xmin[j] + rand.random() * (xmax[j] - xmin[j])
if xtype[j] == 1:
discrete_i.append(j)
v_k[i, j] = (xmin[j] + rand.random() * (xmax[j] - xmin[j])) / dt
if x0 != []:
if len(x0.shape) == 1:
if scale == 1:
x_k[0, :] = (x0[:] - space_centre) / space_halflen
else:
x_k[0, :] = x0[:]
else:
if x0.shape[0] > swarmsize:
pyOptSparseWarning('%d initial positions specified for %d particles, last %d positions ignored'
% (x0.shape[0], swarmsize, x0.shape[0] - swarmsize))
x0 = x0[0:swarmsize, :]
for i in range(x0.shape[0]):
if scale == 1:
x_k[i, :] = (x0[i, :] - space_centre) / space_halflen
else:
x_k[i, :] = x0[i, :]
# Initialize Augmented Lagrange
f = numpy.zeros(swarmsize, float)
L = numpy.zeros(swarmsize, float)
g = numpy.zeros([swarmsize, constraints], float)
g_old = numpy.zeros([swarmsize, constraints], float)
rp = numpy.ones(constraints, float) * r0
lambda_val = numpy.zeros(constraints, float)
lambda_old = numpy.zeros(constraints, float)
tau = numpy.zeros([swarmsize, constraints], float)
tau_new = numpy.zeros(constraints, float)
tau_old = numpy.zeros(constraints, float)
nfevals = 0
if h_start:
[vals, hist_end] = hstfile.read([], ident=['obj', 'con'])
f = vals['obj'][0]
g = vals['con'][0].reshape(g.shape)
else:
# Evaluate Objective Function
if scale == 1:
xtmp = (x_k * space_halflen) + space_centre
else:
xtmp = x_k
for m in discrete_i:
xtmp[:, m] = floor(xtmp[:, m] + .5)
f, g = objfunc(xtmp)
nfevals += swarmsize
for i in range(swarmsize):
# Augmented Lagrangian Value
L[i] = f[i]
if constraints > 0:
# Equality Constraints
for l in range(neqcons):
tau[i, l] = g[i, l]
# Inequality Constraints
for l in range(neqcons, constraints):
if rp[l] != 0:
if g[i, l] > -lambda_val[l] / (2 * rp[l]):
tau[i, l] = g[i, l]
else:
tau[i, l] = -lambda_val[l] / (2 * rp[l])
else:
tau[i, l] = g[i, l]
#
for l in range(constraints):
L[i] += lambda_val[l] * tau[i, l] + rp[l] * tau[i, l] ** 2
# Initialize Particles Best
best_x = numpy.zeros((swarmsize, dimensions))
best_L = numpy.zeros(swarmsize, float)
best_f = numpy.zeros(swarmsize, float)
best_g = numpy.zeros([swarmsize, constraints], float)
for i in range(swarmsize):
for j in range(dimensions):
best_x[i, j] = x_k[i, j]
best_L[i] = L[i]
best_f[i] = f[i]
for l in range(constraints):
best_g[i, l] = g[i, l]
# Initialize Swarm Best
swarm_i = L.argmin()
swarm_i_old = 0
swarm_x = numpy.zeros(dimensions, float)
for j in range(dimensions):
swarm_x[j] = x_k[swarm_i, j]
swarm_L = L[swarm_i]
swarm_L_old = L[0]
swarm_f = f[swarm_i]
swarm_f_old = f[0]
swarm_g = numpy.zeros(constraints, float)
swarm_g_old = numpy.zeros(constraints, float)
for l in range(constraints):
swarm_g[l] = g[swarm_i, l]
swarm_g_old[l] = g[0, l]
# Initialize Neighbourhood
if (nhm == 'dlring') or (nhm == 'slring') or (nhm == 'wheel') or (nhm == 'spatial') or (nhm == 'sfrac'):
nhps = []
nhbest_L = numpy.ones(swarmsize) * inf
nhbest_f = numpy.zeros(swarmsize)
nhbest_x = numpy.zeros((swarmsize, dimensions))
nhbest_i = numpy.zeros(swarmsize)
if nhm == 'dlring':
for i in range(swarmsize):
nhps.append([])
if nhs == 0:
nhps[i].append(i)
for nb in range(1, (nhn / 2) + 1):
if i + nb >= swarmsize:
nhps[i].append(-1 + nb)
else:
nhps[i].append(i + nb)
if i - nb < 0:
nhps[i].append(swarmsize + i - nb)
else:
nhps[i].append(i - nb)
elif nhm == 'slring':
for i in range(swarmsize):
nhps.append([])
if nhs == 0:
nhps[i].append(i)
for nb in range(1, (nhn / 2) + 1):
if i + nb >= swarmsize:
nhps[i].append(-1 + nb)
else:
nhps[i].append(i + nb)
if i - (nb * 2) < 0:
nhps[i].append(swarmsize + i - (nb * 2))
else:
nhps[i].append(i - (nb * 2))
elif nhm == 'wheel':
nhps.append([])
nhps[0].append(0)
for i in range(1, swarmsize):
nhps.append([])
nhps[i].append(i)
nhps[i].append(0)
nhps[0].append(i)
elif nhm == 'spatial':
pdist = numpy.ones((swarmsize, swarmsize)) * inf
for i in range(swarmsize):
for i2 in range(i + 1, swarmsize):
pdist[i, i2] = numpy.linalg.norm(x_k[i2, :] - x_k[i, :])
for i2 in range(i):
pdist[i, i2] = pdist[i2, i]
for i in range(swarmsize):
nhps.append([])
for nb in range(nhn):
nhps[i].append(pdist[i, :].argmin())
pdist[i, nhps[i][nb]] = inf
if nhs == 0:
nhps[i].append(i)
elif nhm == 'sfrac':
pdist = numpy.zeros((swarmsize, swarmsize))
d_max = numpy.zeros(swarmsize)
frac = 0.6
for i in range(swarmsize):
for i2 in range(i + 1, swarmsize):
pdist[i, i2] = numpy.linalg.norm(x_k[i2, :] - x_k[i, :])
for i2 in range(i):
pdist[i, i2] = pdist[i2, i]
for i in range(swarmsize):
nhps.append([])
d_max[i] = pdist[i, :].max()
for i2 in range(swarmsize):
if i == i2:
if nhs == 1:
pass
else:
nhps[i].append(i)
else:
if pdist[i, i2] / d_max[i] < frac:
nhps[i].append(i2)
# Inizialize Neighbourhood Best
for i in range(swarmsize):
for nbp in nhps[i]:
if L[nbp] < nhbest_L[i]:
nhbest_L[i] = L[nbp]
nhbest_f[i] = f[nbp]
nhbest_x[i, :] = x_k[nbp, :]
nhbest_i[i] = nbp
# Initialize stopping criteria distances
global_dist = 0
for i in range(swarmsize):
dist = 0
for j in range(dimensions):
dist += (x_k[i, j] - swarm_x[j]) ** 2
global_dist += dist ** 0.5
global_distance_reference = global_dist / swarmsize # relative extent of the swarm
global_distance = numpy.zeros(stopIters, float)
global_L = numpy.zeros(stopIters, float)
for k in range(stopIters):
global_distance[k] = global_distance_reference
global_L[k] = swarm_L
# Store History
if sto_hst:
logfile.write(rseed, 'seed')
if scale == 1:
x_uns = numpy.zeros(x_k.shape)
for i in range(swarmsize):
x_uns[i, :] = (x_k[i, :] * space_halflen) + space_centre
else:
x_uns = x_k
if discrete_i:
for i in range(swarmsize):
for m in discrete_i:
x_uns[i, m] = floor(x_uns[i, m] + 0.5)
logfile.write(x_uns, 'x')
logfile.write(f, 'obj')
logfile.write(g, 'con')
logfile.write(swarm_x, 'gbest_x')
logfile.write(swarm_f, 'gbest_f')
logfile.write(swarm_g, 'gbest_g')
# Output to Summary File
if (fileout == 2) or (fileout == 3):
stext = ''
stext += 'Global Best Particle:\n'
stext += '-' * 97 + '\n'
stext += ' Major Minor nFCon Violation(L2) Objective Lagrangian Rel Lagrangian Global Dist\n'
stext += '-' * 97 + '\n'
sfile.write(stext)
sfile.flush()
# Outer optimization loop
k_out = 0
stop_main_flag = 0
no_successes = 0
no_failures = 0
rho = 1.0
vcr = 0.0
while (k_out < maxOutIter) and (stop_main_flag == 0):
k_out += 1
# Update g_old Major Iteration
for i in range(swarmsize):
g_old[i, :] = g[i, :]
# Inner optimization loop - core ALPSO algorithm applied to the lagrangian function
k_inn = 0
stop_inner = 0
while (k_inn < maxInnIter) and (stop_inner == 0):
k_inn += 1
# calculating new search radius for the best particle ("Guaranteed Convergence" method)
if (swarm_i == swarm_i_old) and (swarm_L >= swarm_L_old):
no_failures += 1
no_successes = 0
elif (swarm_i == swarm_i_old) and (swarm_L < swarm_L_old):
no_successes += 1
no_failures = 0
else:
no_successes = 0
no_failures = 0
if no_successes > ns:
rho *= 2.0
no_successes = 0
elif no_failures > nf:
rho *= 0.5
no_failures = 0
if rho < 10e-5:
rho = 10e-5
elif rho > 1.0:
rho = 1.0
# memorization for next outer iteration
if k_inn == 1:
swarm_i_old = swarm_i
swarm_L_old = swarm_L
swarm_f_old = swarm_f
swarm_g_old[:] = swarm_g[:]
# stopping criteria distances
global_dist = 0
for i in range(swarmsize):
dist = 0
for j in range(dimensions):
dist += (x_k[i, j] - swarm_x[j]) ** 2
global_dist += dist ** 0.5
global_distance[0] = global_dist / swarmsize # relative extent of the swarm
# Update inertia weight
w = w2 + ((w2 - w1) / global_distance_reference) * global_distance[1]
if w > w1:
w = w1
elif w < w2:
w = w2
# Swarm Update
for i in range(swarmsize):
# Update velocity vector
if (nhm == 'dlring') or (nhm == 'slring') or (nhm == 'wheel') or (nhm == 'spatial') or (nhm == 'sfrac'):
lbest_x = nhbest_x[i, :]
else:
lbest_x = swarm_x[:]
for j in range(dimensions):
if i == swarm_i:
rr = rand.random()
v_k[i, j] = w * v_k[i, j] + -x_k[i, j] + swarm_x[j] + rho * (1.0 - 2.0 * rr)
else:
r1 = rand.random()
r2 = rand.random()
rc = rand.random()
v_k[i, j] = w * v_k[i, j] + c1 * r1 * (best_x[i, j] - x_k[i, j]) / dt + c2 * r2 * (
lbest_x[j] - x_k[i, j]) / dt + vcr * (1.0 - 2.0 * rc)
# Check for velocity vector out of range
if v_k[i, j] > vmax[j]:
v_k[i, j] = vmax[j]
elif v_k[i, j] < -vmax[j]:
v_k[i, j] = -vmax[j]
# positions update
x_k[i, j] += v_k[i, j] * dt
# Check for positions out of range
if x_k[i, j] > xmax[j]:
x_k[i, j] = xmax[j]
elif x_k[i, j] < xmin[j]:
x_k[i, j] = xmin[j]
# Augmented Lagrange
if h_start:
[vals, hist_end] = hstfile.read([], ident=['obj', 'con'])
if not hist_end:
f = vals['obj'][0]
g = vals['con'][0].reshape(g.shape)
else:
h_start = False
hstfile.close()
if not h_start:
# Evaluate Objective Function
if scale == 1:
xtmp = (x_k * space_halflen) + space_centre
else:
xtmp = x_k
for m in discrete_i:
xtmp[:, m] = floor(xtmp[:, m] + .5)
f, g = objfunc(xtmp)
nfevals += swarmsize
# Store History
if sto_hst:
if scale == 1:
x_uns = numpy.zeros(x_k.shape)
for i in range(swarmsize):
x_uns[i, :] = (x_k[i, :] * space_halflen) + space_centre
else:
x_uns = x_k
if discrete_i:
for i in range(swarmsize):
for m in discrete_i:
x_uns[i, m] = floor(x_uns[i, m] + 0.5)
logfile.write(x_uns, 'x')
logfile.write(f, 'obj')
logfile.write(g, 'con')
for i in range(swarmsize):
# Lagrangian Value
L[i] = f[i]
if constraints > 0:
# Equality Constraints
for l in range(neqcons):
tau[i, l] = g[i, l]
# Inequality Constraints
for l in range(neqcons, constraints):
if rp[l] != 0:
if g[i, l] > -lambda_val[l] / (2 * rp[l]):
tau[i, l] = g[i, l]
else:
tau[i, l] = -lambda_val[l] / (2 * rp[l])
else:
tau[i, l] = g[i, l]
#
for l in range(constraints):
L[i] += lambda_val[l] * tau[i, l] + rp[l] * tau[i, l] ** 2
# If there is no new better solution for gbest keep the old best position
# if (L[swarm_i] > swarm_L):
# x_k[swarm_i,:] = swarm_x[:]
# f[swarm_i] = swarm_f
# g[swarm_i,:] = swarm_g[:]
# L[swarm_i] = swarm_L
# Particle Best Update
for i in range(swarmsize):
if L[i] < best_L[i]:
best_L[i] = L[i]
best_f[i] = f[i]
best_g[i, :] = g[i, :]
best_x[i, :] = x_k[i, :]
# Swarm Best Update
for i in range(swarmsize):
if L[i] < swarm_L:
# update of the best particle and best position
swarm_i = i
swarm_x[:] = x_k[i, :]
# update of the best objective function value found
swarm_f = f[i]
# update of the best constraints values found
swarm_g[:] = g[i, :]
# update of the swarm best L
swarm_L = L[i]
# Spatial Neighbourhood Update
if (nhm == 'spatial') or (nhm == 'sfrac'):
for i in range(swarmsize):
for i2 in range(i + 1, swarmsize):
pdist[i, i2] = numpy.linalg.norm(x_k[i2, :] - x_k[i, :])
for i2 in range(i):
pdist[i, i2] = pdist[i2, i]
if nhm == 'spatial':
for i in range(swarmsize):
nhps[i] = []
for nb in range(nhn):
nhps[i].append(pdist[i, :].argmin())
pdist[i, nhps[i][nb]] = inf
if nhs == 0:
nhps[i].append(i)
else:
frac = ((3 * k_out) + 0.6 * maxOutIter) / maxOutIter
if frac >= 1.0:
nhm = 'gbest'
else:
for i in range(swarmsize):
nhps[i] = []
d_max[i] = pdist[i, :].max()
for i2 in range(swarmsize):
if i == i2:
if nhs == 1:
pass
else:
nhps[i].append(i)
else:
if pdist[i, i2] / d_max[i] < frac:
nhps[i].append(i2)
# Neighbourhood Best Update
if (nhm == 'dlring') or (nhm == 'slring') or (nhm == 'wheel') or (nhm == 'spatial') or (nhm == 'sfrac'):
for i in range(swarmsize):
for nbp in nhps[i]:
if L[nbp] < nhbest_L[i]:
nhbest_L[i] = L[nbp]
nhbest_f[i] = f[nbp]
nhbest_x[i, :] = x_k[nbp, :]
nhbest_i[i] = nbp
# Print Inner
if prtInnIter != 0 and numpy.mod(k_inn, prtInnIter) == 0:
# output to screen
print('Outer Iteration: %d [%d. Inner Iteration]' % (k_out, k_inn))
if (fileout == 1) or (fileout == 3):
# output to filename
pass
# Inner Loop Convergence
if k_inn >= minInnIter:
if swarm_L < swarm_L_old:
stop_inner = 1
# Store History
if sto_hst:
logfile.write(swarm_x, 'gbest_x')
logfile.write(swarm_f, 'gbest_f')
logfile.write(swarm_g, 'gbest_g')
# Print Outer
if prtOutIter != 0 and numpy.mod(k_out, prtOutIter) == 0:
# Output to screen
print("=" * 80 + "\n")
print("NUMBER OF ITERATIONS: %d\n" % k_out)
print("NUMBER OF OBJECTIVE FUNCTION EVALUATIONS: %d\n" % nfevals)
print("OBJECTIVE FUNCTION VALUE:")
print("\tF = %.16g\n" % (float(swarm_f)))
if constraints > 0:
# Equality Constraints
print("EQUALITY CONSTRAINTS VALUES:")
for l in range(neqcons):
print("\tH(%d) = %g" % (l, swarm_g[l]))
# Inequality Constraints
print("\nINEQUALITY CONSTRAINTS VALUES:")
for l in range(neqcons, constraints):
print("\tG(%d) = %g" % (l, swarm_g[l]))
print("\nLAGRANGIAN MULTIPLIERS VALUES:")
for l in range(constraints):
print("\tL(%d) = %g" % (l, lambda_val[l]))
print("\nBEST POSITION:")
if scale == 1:
xtmp = (swarm_x[:] * space_halflen) + space_centre
else:
xtmp = swarm_x[:]
for m in discrete_i:
xtmp[m] = floor(xtmp[m] + 0.5)
text = ''
for j in range(dimensions):
text += ("\tP(%d) = %.16g\t" % (j, xtmp[j]))
if numpy.mod(j + 1, 3) == 0:
text += "\n"
print(text)
print("=" * 80 + "\n")
if (fileout == 1) or (fileout == 3):
# Output to Print File
ofile.write("\n" + "=" * 80 + "\n")
ofile.write("\nNUMBER OF ITERATIONS: %d\n" % k_out)
ofile.write("\nNUMBER OF OBJECTIVE FUNCTION EVALUATIONS: %d\n" % nfevals)
ofile.write("\nOBJECTIVE FUNCTION VALUE:\n")
ofile.write("\tF = %.16g\n" % (float(swarm_f)))
if constraints > 0:
# Equality Constraints
ofile.write("\nEQUALITY CONSTRAINTS VALUES:\n")
for l in range(neqcons):
ofile.write("\tH(%d) = %.16g\n" % (l, swarm_g[l]))
# Inequality Constraints
ofile.write("\nINEQUALITY CONSTRAINTS VALUES:\n")
for l in range(neqcons, constraints):
ofile.write("\tG(%d) = %.16g\n" % (l, swarm_g[l]))
ofile.write("\nLAGRANGIAN MULTIPLIERS VALUES:\n")
for l in range(constraints):
ofile.write("\tL(%d) = %.16g\n" % (l, lambda_val[l]))
ofile.write("\nPENALTY FACTOR:\n")
for l in range(constraints):
ofile.write("\trp(%d) = %.16g\n" % (l, rp[l]))
ofile.write("\nBEST POSITION:\n")
if scale == 1:
xtmp = (swarm_x[:] * space_halflen) + space_centre
else:
xtmp = swarm_x[:]
for m in discrete_i:
xtmp[m] = floor(xtmp[m] + 0.5)
text = ''
for j in range(dimensions):
text += ("\tP(%d) = %.16g\t" % (j, xtmp[j]))
if numpy.mod(j + 1, 3) == 0:
text += "\n"
ofile.write(text)
ofile.write("\n" + "=" * 80 + "\n")
ofile.flush()
# Store History
if sto_hst and (minInnIter != maxInnIter):
logfile.write(k_inn, 'ninner')
# Test Constraint convergence
stop_con_num = 0
infeas_con = []
if constraints == 0:
stop_constraints_flag = 1
else:
for l in range(neqcons):
if abs(swarm_g[l]) <= etol:
stop_con_num += 1
else:
infeas_con.append(l)
for l in range(neqcons, constraints):
if swarm_g[l] < itol:
stop_con_num += 1
else:
infeas_con.append(l)
if stop_con_num == constraints:
stop_constraints_flag = 1
else:
stop_constraints_flag = 0
# # Test Lagrange multiplier convergence
# stop_lambda_flag = 0
# if (constraints == 0):
# stop_lambda_flag = 1
# else:
# for l in range(constraints):
# if (abs(lambda_val[l]-lambda_old[l]) <= ltol):
# stop_lambda_flag += 1
#
#
# if (stop_lambda_flag==constraints):
# stop_lambda_flag = 1
# else:
# stop_lambda_flag = 0
#
#
#
# Test Position and Function convergence
stop_criteria_flag = 0
if stopCriteria == 1:
# setting up the stopping criteria based on distance and tolerance
for k in range(stopIters - 1, 0, -1):
global_distance[k] = global_distance[k - 1]
global_L[k] = global_L[k - 1]
#
global_dist = 0
for i in range(swarmsize):
dist = 0
for j in range(dimensions):
dist += (x_k[i, j] - swarm_x[j]) ** 2
global_dist += dist ** 0.5
global_distance[0] = global_dist / swarmsize # relative extent of the swarm
#
global_L[0] = swarm_L
#
if (abs(global_distance[0] - global_distance[stopIters - 1]) <=
dtol * abs(global_distance[stopIters - 1]) and
abs(global_L[0] - global_L[stopIters - 1]) <= rtol * abs(global_L[stopIters - 1]) or
abs(global_L[0] - global_L[stopIters - 1]) <= atol):
stop_criteria_flag = 1
else:
stop_criteria_flag = 0
# Test Convergence
if stop_constraints_flag == 1 and stop_criteria_flag == 1: # and stop_lambda_flag == 1
stop_main_flag = 1
else:
stop_main_flag = 0
# Output to Summary File
if (fileout == 2) or (fileout == 3):
cvss = 0.0
for l in infeas_con:
cvss += swarm_g[l] ** 2
cvL2 = cvss ** 0.5
if stopCriteria == 1:
relL = abs(global_L[0] - global_L[stopIters - 1]) / abs(global_L[stopIters - 1])
stext = '%9d%8d%8d%15.4e%15f%13.4e%16.4e%14.4e\n' % (
k_out, k_inn, stop_con_num, cvL2, swarm_f, swarm_L, relL, global_distance[0])
else:
stext = '%9d%8d%8d%15.4e%15f%13.4e%16s%14s\n' % (
k_out, k_inn, stop_con_num, cvL2, swarm_f, swarm_L, 'NA', 'NA')
sfile.write(stext)
sfile.flush()
# Update Augmented Lagrangian Terms
if stop_main_flag == 0:
if constraints > 0:
# Update new Tau
for l in range(neqcons):
tau_new[l] = swarm_g[l]
for l in range(neqcons, constraints):
if swarm_g[l] > -lambda_val[l] / (2 * rp[l]):
tau_new[l] = swarm_g[l]
else:
tau_new[l] = -lambda_val[l] / (2 * rp[l])
# Update Lagrange Multiplier
for l in range(constraints):
lambda_old[l] = lambda_val[l]
lambda_val[l] += 2 * rp[l] * tau_new[l]
if abs(lambda_val[l]) < eps:
lambda_val[l] = 0.0
# Update Penalty Factor
for l in range(neqcons):
if abs(swarm_g[l]) > abs(swarm_g_old[l]) and abs(swarm_g[l]) > etol:
rp[l] *= 2.0
elif abs(swarm_g[l]) <= etol:
rp[l] *= 0.5
for l in range(neqcons, constraints):
if swarm_g[l] > swarm_g_old[l] and swarm_g[l] > itol:
rp[l] *= 2.0
elif swarm_g[l] <= itol:
rp[l] *= 0.5
# Apply Lower Bounds on rp
for l in range(neqcons):
if rp[l] < 0.5 * (abs(lambda_val[l]) / etol) ** 0.5:
rp[l] = 0.5 * (abs(lambda_val[l]) / etol) ** 0.5
for l in range(neqcons, constraints):
if rp[l] < 0.5 * (abs(lambda_val[l]) / itol) ** 0.5:
rp[l] = 0.5 * (abs(lambda_val[l]) / itol) ** 0.5
for l in range(constraints):
if rp[l] < 1:
rp[l] = 1
for i in range(swarmsize):
if constraints > 0:
# Update Tau
for l in range(neqcons):
tau[i, l] = g[i, l]
for l in range(neqcons, constraints):
if g[i, l] > -lambda_val[l] / (2 * rp[l]):
tau[i, l] = g[i, l]
else:
tau[i, l] = -lambda_val[l] / (2 * rp[l])
# set craziness velocity for next inner loop run
vcr = (1 - k_out / maxOutIter) * vcrazy
# update swarm with new Lagrangian function for next inner run
for i in range(swarmsize):
L[i] = f[i]
if constraints > 0:
for l in range(constraints):
L[i] += lambda_val[l] * tau[i, l] + rp[l] * tau[i, l] ** 2
swarm_L = L[swarm_i]
swarm_L_old = swarm_f_old
if constraints > 0:
# Equality Constraints
for l in range(neqcons):
tau_old[l] = swarm_g_old[l]
# Inequality Constraints
for l in range(neqcons, constraints):
if rp[l] != 0:
if swarm_g_old[l] > -lambda_val[l] / (2 * rp[l]):
tau_old[l] = swarm_g_old[l]
else:
tau_old[l] = -lambda_val[l] / (2 * rp[l])
else:
tau_old[l] = swarm_g_old[l]
#
for l in range(constraints):
swarm_L_old += lambda_val[l] * tau_old[l] + rp[l] * tau_old[l] ** 2
# reset swarm memory for next inner run
for i in range(swarmsize):
best_L[i] = L[i]
best_f[i] = f[i]
best_g[i, :] = g[i, :]
best_x[i, :] = x_k[i, :]
# Print Results
if prtOutIter != 0:
# Output to screen
print("=" * 80 + "\n")
print("RANDOM SEED VALUE: %.8f\n" % rseed)
print("NUMBER OF ITERATIONS: %d\n" % k_out)
print("NUMBER OF OBJECTIVE FUNCTION EVALUATIONS: %d\n" % nfevals)
print("OBJECTIVE FUNCTION VALUE:")
print("\tF = %.16g\n" % (float(swarm_f)))
if constraints > 0:
# Equality Constraints
print("EQUALITY CONSTRAINTS VALUES:")
for l in range(neqcons):
print("\tH(%d) = %g" % (l, swarm_g[l]))
# Inequality Constraints
print("\nINEQUALITY CONSTRAINTS VALUES:")
for l in range(neqcons, constraints):
print("\tG(%d) = %g" % (l, swarm_g[l]))
print("\nLAGRANGIAN MULTIPLIERS VALUES:")
for l in range(constraints):
print("\tL(%d) = %g" % (l, float(lambda_val[l])))
print("\nBEST POSITION:")
if scale == 1:
xtmp = (swarm_x[:] * space_halflen) + space_centre
else:
xtmp = swarm_x[:]
for m in discrete_i:
xtmp[m] = floor(xtmp[m] + 0.5)
text = ''
for j in range(dimensions):
text += ("\tP(%d) = %.16g\t" % (j, xtmp[j]))
if numpy.mod(j + 1, 3) == 0:
text += "\n"
print(text)
print("=" * 80 + "\n")
if (fileout == 1) or (fileout == 3):
ofile.close()
if (fileout == 2) or (fileout == 3):
# Output to Summary
sfile.write("\n\nSolution:")
sfile.write("\n" + "=" * 94 + "\n")
sfile.write("\nNUMBER OF ITERATIONS: %d\n" % k_out)
sfile.write("\nNUMBER OF OBJECTIVE FUNCTION EVALUATIONS: %d\n" % nfevals)
sfile.write("\nOBJECTIVE FUNCTION VALUE:\n")
sfile.write("\tF = %.16g\n" % (float(swarm_f)))
if constraints > 0:
# Equality Constraints
sfile.write("\nEQUALITY CONSTRAINTS VALUES:\n")
for l in range(neqcons):
sfile.write("\tH(%d) = %.16g\n" % (l, swarm_g[l]))
# Inequality Constraints
sfile.write("\nINEQUALITY CONSTRAINTS VALUES:\n")
for l in range(neqcons, constraints):
sfile.write("\tG(%d) = %.16g\n" % (l, swarm_g[l]))
sfile.write("\nLAGRANGIAN MULTIPLIERS VALUES:\n")
for l in range(constraints):
sfile.write("\tL(%d) = %.16g\n" % (l, float(lambda_val[l])))
sfile.write("\nPENALTY FACTOR:\n")
for l in range(constraints):
sfile.write("\trp(%d) = %.16g\n" % (l, rp[l]))
sfile.write("\nBEST POSITION:\n")
if scale == 1:
xtmp = (swarm_x[:] * space_halflen) + space_centre
else:
xtmp = swarm_x[:]
for m in discrete_i:
xtmp[m] = floor(xtmp[m] + 0.5)
text = ''
for j in range(dimensions):
text += ("\tP(%d) = %.16g\t" % (j, xtmp[j]))
if numpy.mod(j + 1, 3) == 0:
text += "\n"
sfile.write(text)
sfile.write("\n" + "=" * 94 + "\n")
sfile.flush()
sfile.close()
# Results
if scale == 1:
opt_x = (swarm_x * space_halflen) + space_centre
else:
opt_x = swarm_x
for m in discrete_i:
opt_x[m] = int(floor(opt_x[m] + 0.5))
opt_f = swarm_f
opt_g = swarm_g
opt_lambda = lambda_val[:]
return opt_x, opt_f, opt_g, opt_lambda, nfevals, '%.8f' % rseed
| 35.097087 | 141 | 0.44089 |
1f78e842488c541d546cb31c34c453d496b829e8 | 3,823 | py | Python | test/functional/wallet_encryption.py | allforonebusiness/AllForOneBusiness | 63978033c60e2106adb7c6d9ba4bd3a4fc159ce6 | [
"MIT"
] | null | null | null | test/functional/wallet_encryption.py | allforonebusiness/AllForOneBusiness | 63978033c60e2106adb7c6d9ba4bd3a4fc159ce6 | [
"MIT"
] | null | null | null | test/functional/wallet_encryption.py | allforonebusiness/AllForOneBusiness | 63978033c60e2106adb7c6d9ba4bd3a4fc159ce6 | [
"MIT"
] | 4 | 2019-11-13T21:49:01.000Z | 2020-11-29T20:17:14.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import AllForOneBusinessTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(AllForOneBusinessTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "c")
assert_equal(len(privkey), 52)
# Encrypt the wallet
self.nodes[0].node_encrypt_wallet(passphrase)
self.start_node(0)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wallet already unlocked
self.nodes[0].walletpassphrase(passphrase, 12000, True)
assert_raises_rpc_error(-17, "Wallet is already unlocked", self.nodes[0].walletpassphrase, passphrase, 100, True)
self.nodes[0].walletlock()
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
# Check the timeout
# Check a time less than the limit
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
# Check a time greater than the limit
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
if __name__ == '__main__':
WalletEncryptionTest().main()
| 43.942529 | 138 | 0.706513 |
020f7a329738a2c26675e58a7ab6bf3992630a4c | 1,830 | py | Python | media/transcoder/list_job_templates.py | BaljitSingh919/Project360 | b8ec08f6598e6b4d6d190b63c6b64f268225bd2d | [
"Apache-2.0"
] | 5,938 | 2015-05-18T05:04:37.000Z | 2022-03-31T20:16:39.000Z | media/transcoder/list_job_templates.py | pranshusinghal/python-docs-samples | 3341286ec657d9010d2441de66f7e45c7f5cefdd | [
"Apache-2.0"
] | 4,730 | 2015-05-07T19:00:38.000Z | 2022-03-31T21:59:41.000Z | media/transcoder/list_job_templates.py | FFHixio/python-docs-samples | b39441b3ca0a7b27e9c141e9b43e78e729105573 | [
"Apache-2.0"
] | 6,734 | 2015-05-05T17:06:20.000Z | 2022-03-31T12:02:51.000Z | #!/usr/bin/env python
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Transcoder sample for listing job templates in a location.
Example usage:
python list_job_templates.py --project_id <project-id> --location <location>
"""
# [START transcoder_list_job_templates]
import argparse
from google.cloud.video.transcoder_v1.services.transcoder_service import (
TranscoderServiceClient,
)
def list_job_templates(project_id, location):
"""Lists all job templates in a location.
Args:
project_id: The GCP project ID.
location: The location of the templates."""
client = TranscoderServiceClient()
parent = f"projects/{project_id}/locations/{location}"
response = client.list_job_templates(parent=parent)
print("Job templates:")
for jobTemplate in response.job_templates:
print({jobTemplate.name})
return response
# [END transcoder_list_job_templates]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--project_id", help="Your Cloud project ID.", required=True)
parser.add_argument(
"--location", help="The location of the templates.", required=True
)
args = parser.parse_args()
list_job_templates(args.project_id, args.location)
| 30.5 | 85 | 0.734973 |
5144253a93105389f895957baf1c408c708606a1 | 6,775 | py | Python | datadog_checks_dev/tests/tooling/test_utils.py | mchelen-gov/integrations-core | 81281600b3cc7025a7a32148c59620c9592a564f | [
"BSD-3-Clause"
] | 663 | 2016-08-23T05:23:45.000Z | 2022-03-29T00:37:23.000Z | datadog_checks_dev/tests/tooling/test_utils.py | mchelen-gov/integrations-core | 81281600b3cc7025a7a32148c59620c9592a564f | [
"BSD-3-Clause"
] | 6,642 | 2016-06-09T16:29:20.000Z | 2022-03-31T22:24:09.000Z | datadog_checks_dev/tests/tooling/test_utils.py | mchelen-gov/integrations-core | 81281600b3cc7025a7a32148c59620c9592a564f | [
"BSD-3-Clause"
] | 1,222 | 2017-01-27T15:51:38.000Z | 2022-03-31T18:17:51.000Z | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from os.path import join
import mock
from datadog_checks.dev.tooling.config import copy_default_config
from datadog_checks.dev.tooling.utils import (
complete_set_root,
get_check_files,
get_version_string,
initialize_root,
is_logs_only,
parse_agent_req_file,
)
from ..common import not_windows_ci
def test_parse_agent_req_file():
contents = "datadog-active-directory==1.1.1; sys_platform == 'win32'\nthis is garbage"
catalog = parse_agent_req_file(contents)
assert len(catalog) == 1
assert catalog['datadog-active-directory'] == '1.1.1'
def test_get_version_string():
with mock.patch('datadog_checks.dev.tooling.utils.read_version_file') as read:
read.return_value = '__version__ = "2.0.0"'
assert get_version_string('foo_check') == '2.0.0'
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
def test_is_logs_only(get_root):
get_root.return_value = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../..'))
assert is_logs_only('flink')
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_initialize_root_bad_path(set_root, get_root):
get_root.return_value = ''
# bad path in config results in cwd
config = copy_default_config()
config['core'] = '/path/does/not/exist'
initialize_root(config)
assert set_root.called
set_root.assert_called_with(os.getcwd())
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_initialize_root_good_path(set_root, get_root):
get_root.return_value = ''
# good path in config uses that
config = copy_default_config()
config['core'] = '~'
initialize_root(config)
assert set_root.called
set_root.assert_called_with(os.path.expanduser('~'))
@not_windows_ci
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_initialize_root_env_var(set_root, get_root):
get_root.return_value = ''
ddev_env = '/tmp'
with mock.patch.dict(os.environ, {'DDEV_ROOT': ddev_env}):
config = copy_default_config()
initialize_root(config)
assert set_root.called
set_root.assert_called_with(os.path.expanduser(ddev_env))
@not_windows_ci
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_complete_set_root_no_args(set_root, get_root):
get_root.return_value = ''
with mock.patch('datadog_checks.dev.tooling.utils.load_config') as load_config:
config = copy_default_config()
config['core'] = '/tmp' # ensure we choose a dir that exists
load_config.return_value = config
args = []
complete_set_root(args)
assert set_root.called
set_root.assert_called_with(config['core'])
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_complete_set_root_here(set_root, get_root):
get_root.return_value = ''
with mock.patch('datadog_checks.dev.tooling.utils.load_config') as load_config:
config = copy_default_config()
load_config.return_value = config
args = ['-x']
complete_set_root(args)
assert set_root.called
set_root.assert_called_with(os.getcwd())
@not_windows_ci
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_complete_set_root_extras(set_root, get_root):
get_root.return_value = ''
with mock.patch('datadog_checks.dev.tooling.utils.load_config') as load_config:
config = copy_default_config()
config['extras'] = '/tmp' # ensure we choose a dir that exists
load_config.return_value = config
args = ['-e']
complete_set_root(args)
assert set_root.called
set_root.assert_called_with(config['extras'])
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
def test_get_check_files(get_root):
get_root.return_value = ''
mock_dir_map = {
'': [
(
'dns_check',
['datadog_checks', 'datadog_dns_check.egg-info', 'tests', '.junit', 'assets'],
[
'CHANGELOG.md',
'MANIFEST.in',
'setup.py',
'requirements-dev.txt',
'tox.ini',
'manifest.json',
'metadata.csv',
],
)
],
'datadog_checks': [
(join('dns_check', 'datadog_checks'), ['dns_check'], ['__init__.py']),
(
join('dns_check', 'datadog_checks', 'dns_check'),
['data'],
['__init__.py', '__about__.py', 'dns_check.py'],
),
(join('dns_check', 'datadog_checks', 'dns_check', 'data'), [], ['conf.yaml.example']),
],
'.tox': [(join('dns_check', '.tox'), ['py37', '.tmp', 'py27'], [])],
'datadog_dns_check.egg-info': [
(join('dns_check', 'datadog_dns_check.egg-info'), [], ['PKG-INFO', 'SOURCES.txt'])
],
'tests': [(join('dns_check', 'tests'), [], ['test_dns_check.py', '__init__.py', 'common.py'])],
'.junit': [(join('dns_check', '.junit'), [], ['test-e2e-py37.xml', 'test-e2e-py27.xml'])],
'assets': [(join('dns_check', 'assets'), [], ['service_checks.json'])],
}
default_py_files = [
join('dns_check', 'datadog_checks', '__init__.py'),
join('dns_check', 'datadog_checks', 'dns_check', '__init__.py'),
join('dns_check', 'datadog_checks', 'dns_check', '__about__.py'),
join('dns_check', 'datadog_checks', 'dns_check', 'dns_check.py'),
join('dns_check', 'tests', 'test_dns_check.py'),
join('dns_check', 'tests', '__init__.py'),
join('dns_check', 'tests', 'common.py'),
]
with mock.patch('os.walk') as mockwalk:
mockwalk.side_effect = lambda base: mock_dir_map[os.path.basename(base)]
files = get_check_files('dns_check')
assert list(files) == default_py_files
files = get_check_files('dns_check', file_suffix='.json', include_dirs=['assets'])
assert list(files) == [join('dns_check', 'assets', 'service_checks.json')]
files = get_check_files('dns_check', file_suffix='.json', include_dirs=['', 'assets'])
assert list(files) == [join('dns_check', 'manifest.json'), join('dns_check', 'assets', 'service_checks.json')]
| 35.846561 | 118 | 0.648266 |
4e810df48f1632835d3adb11ee9fa296d74536c5 | 6,234 | py | Python | setup.py | thibaultfalque/pysat | ed60b2571101bcfd05cd26617bf66c32540cf241 | [
"MIT"
] | null | null | null | setup.py | thibaultfalque/pysat | ed60b2571101bcfd05cd26617bf66c32540cf241 | [
"MIT"
] | null | null | null | setup.py | thibaultfalque/pysat | ed60b2571101bcfd05cd26617bf66c32540cf241 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## setup.py
##
## Created on: Jan 23, 2018
## Author: Alexey Ignatiev
## E-mail: alexey.ignatiev@monash.edu
##
#
#==============================================================================
import os
import os.path
import contextlib
import glob
try:
from setuptools import setup, Extension
HAVE_SETUPTOOLS = True
except ImportError:
from distutils.core import setup, Extension
HAVE_SETUPTOOLS = False
import distutils.command.build
import distutils.command.build_ext
import distutils.command.install
import inspect, os, sys
sys.path.insert(0, os.path.join(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])), 'solvers/'))
import platform
import prepare
from pysat import __version__
#
#==============================================================================
@contextlib.contextmanager
def chdir(new_dir):
old_dir = os.getcwd()
try:
os.chdir(new_dir)
yield
finally:
os.chdir(old_dir)
#
#==============================================================================
ROOT = os.path.abspath(os.path.dirname(__file__))
LONG_DESCRIPTION = """
A Python library providing a simple interface to a number of state-of-art
Boolean satisfiability (SAT) solvers and a few types of cardinality and
pseudo-Boolean encodings. The purpose of PySAT is to enable researchers
working on SAT and its applications and generalizations to easily prototype
with SAT oracles in Python while exploiting incrementally the power of the
original low-level implementations of modern SAT solvers.
With PySAT it should be easy for you to implement a MaxSAT solver, an
MUS/MCS extractor/enumerator, or any tool solving an application problem
with the (potentially multiple) use of a SAT oracle.
Details can be found at `https://pysathq.github.io <https://pysathq.github.io>`__.
"""
# solvers to install
#==============================================================================
to_install = ['cadical', 'glucose30', 'glucose41', 'lingeling', 'maplechrono',
'maplecm', 'maplesat', 'minicard', 'minisat22', 'minisatgh']
# example scripts to install as standalone executables
#==============================================================================
scripts = ['fm', 'genhard', 'lbx', 'lsu', 'mcsls', 'models', 'musx', 'rc2']
# we need to redefine the build command to
# be able to download and compile solvers
#==============================================================================
class build(distutils.command.build.build):
"""
Our custom builder class.
"""
def run(self):
"""
Download, patch and compile SAT solvers before building.
"""
# download and compile solvers
if platform.system() != 'Windows':
prepare.do(to_install)
# now, do standard build
distutils.command.build.build.run(self)
# same with build_ext
#==============================================================================
class build_ext(distutils.command.build_ext.build_ext):
"""
Our custom builder class.
"""
def run(self):
"""
Download, patch and compile SAT solvers before building.
"""
# download and compile solvers
if platform.system() != 'Windows':
prepare.do(to_install)
# now, do standard build
distutils.command.build_ext.build_ext.run(self)
# compilation flags for C extensions
#==============================================================================
compile_flags, cpplib = ['-std=c++11', '-Wall', '-Wno-deprecated'], ['stdc++']
if platform.system() == 'Darwin':
compile_flags += ['--stdlib=libc++']
cpplib = ['c++']
elif platform.system() == 'Windows':
compile_flags = ['-DNBUILD', '-DNLGLYALSAT' , '/DINCREMENTAL', '-DNLGLOG',
'-DNDEBUG', '-DNCHKSOL', '-DNLGLFILES', '-DNLGLDEMA', '-I./win']
cpplib = []
# C extensions: pycard and pysolvers
#==============================================================================
pycard_ext = Extension('pycard',
sources=['cardenc/pycard.cc'],
extra_compile_args=compile_flags,
include_dirs=['cardenc'] ,
language='c++',
libraries=cpplib,
library_dirs=[]
)
pysolvers_sources = ['solvers/pysolvers.cc']
if platform.system() == 'Windows':
prepare.do(to_install)
with chdir('solvers'):
for solver in to_install:
with chdir(solver):
for filename in glob.glob('*.c*'):
pysolvers_sources += ['solvers/%s/%s' % (solver, filename)]
for filename in glob.glob('*/*.c*'):
pysolvers_sources += ['solvers/%s/%s' % (solver, filename)]
libraries = []
library_dirs = []
else:
libraries = to_install + cpplib
library_dirs = list(map(lambda x: os.path.join('solvers', x), to_install))
pysolvers_ext = Extension('pysolvers',
sources=pysolvers_sources,
extra_compile_args=compile_flags + \
list(map(lambda x: '-DWITH_{0}'.format(x.upper()), to_install)),
include_dirs=['solvers'],
language='c++',
libraries=libraries,
library_dirs=library_dirs
)
# finally, calling standard setuptools.setup() (or distutils.core.setup())
#==============================================================================
setup(name='python-sat',
packages=['pysat', 'pysat.examples'],
package_dir={'pysat.examples': 'examples'},
version=__version__,
description='A Python library for prototyping with SAT oracles',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/x-rst; charset=UTF-8',
license='MIT',
author='Alexey Ignatiev, Joao Marques-Silva, Antonio Morgado',
author_email='alexey.ignatiev@monash.edu, joao.marques-silva@univ-toulouse.fr, ajrmorgado@gmail.com',
url='https://github.com/pysathq/pysat',
ext_modules=[pycard_ext, pysolvers_ext],
scripts=['examples/{0}.py'.format(s) for s in scripts],
cmdclass={'build': build, 'build_ext': build_ext},
install_requires=['six'],
extras_require = {
'aiger': ['py-aiger-cnf>=2.0.0'],
'pblib': ['pypblib>=0.0.3']
}
)
| 32.638743 | 138 | 0.580687 |
4e0889224ee37f9489866eb0040acca6d2e77f00 | 62,160 | py | Python | application_test.py | bendiste/Algorithmic-Fairness | 7e3b54e38eddb7572777be6f9772e3b2a8e398ec | [
"MIT"
] | null | null | null | application_test.py | bendiste/Algorithmic-Fairness | 7e3b54e38eddb7572777be6f9772e3b2a8e398ec | [
"MIT"
] | null | null | null | application_test.py | bendiste/Algorithmic-Fairness | 7e3b54e38eddb7572777be6f9772e3b2a8e398ec | [
"MIT"
] | null | null | null |
#Created by eNAS
#Import GUI interface Libraries
from tkinter import*
import tkinter as tk
from tkinter import ttk
import pickle
import tkinter.messagebox
import tksheet
from tkinter.ttk import *
from tkinter.filedialog import askopenfile
import time
from dissim import *
import os, sys
from tkinter import colorchooser
from tkinter import font
from tkinter import simpledialog
import win32print
import win32api
import csv
import io
import pandas as pd
from tkinter import filedialog, Label, Button, Entry, StringVar
from tkinter.filedialog import askopenfilename
# from PIL import ImageTk,Image
# from tkinter import filedialog
# from tkinter.ttk import
#############################Import other .py files ############################################
from deepcopy import *
# from Adult_implementation import *
# from COMPAS_implementation import *
# from German_implementation import *
from implementation_functions import *
# from german_imp import *
import skfuzzy as fuzz
from prince import FAMD #Factor analysis of mixed data
'--------------------------------------------------------------------------------'
import six
import sys
sys.modules['sklearn.externals.six'] = six
import mlrose
import numpy as np
from prince import FAMD #Factor analysis of mixed data
from aif360.metrics import BinaryLabelDatasetMetric
from sklearn.model_selection import train_test_split
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
import skfuzzy as fuzz
from table2 import *
#Creating the application main root
root = Tk()
root.title('Fairness System')
root.title('Resizable')
root.geometry("1000x800")
my_menu = Menu(root)
root.config(menu=my_menu)
# Use this command to hide the tabs, and then show them
def our_command():
pass
def hide():
my_notebook = hide(frame2)
def show():
my_notebook.add(frame2)
'--------------------------------------------------------------------------------------------------------'
#Start creating the tabs
my_notebook = ttk.Notebook(root)
my_notebook.pack(pady = 15)
##################################
#Start designing the frames
frame0 = Frame(my_notebook, width = 2000, height = 2000)
frame1 = Frame(my_notebook, width = 2000, height = 2000)
frame2 = Frame(my_notebook, width = 2000, height = 2000)
frame3 = Frame(my_notebook, width = 2000, height = 2000)
frame5 = Frame(my_notebook, width = 2000, height = 2000)
frame4 = Frame(my_notebook, width = 2000, height = 2000)
frame6 = Frame(my_notebook, width = 2000, height = 2000)
frame7 = Frame(my_notebook, width = 2000, height = 2000)
frame1.pack(fill = "both", expand = 1)
frame2.pack(fill = "both", expand = 1)
my_notebook.add(frame0, text = "Welcome")
my_notebook.add(frame1, text = "Dataset Upload")
my_notebook.add(frame2, text = "Dataset Select")
my_notebook.add(frame3, text = "Explore Data")
my_notebook.add(frame5, text = "Classifications")
my_notebook.add(frame4, text = "Clustering")
# my_notebook.add(frame6, text = "Plotting")
my_notebook.add(frame7, text = "Results")
#Start frame1
myLabel1 = Label(frame1, text = " ").pack()
myLabel2 = Label(frame1, text = " ").pack()
myLabel3 = Label(frame1, text = " ").pack()
myLabel4 = Label(frame1, text = " ").pack()
myLabel5 = Label(frame1, text = "In case you choose your own data, please enter the following input variables", font = ("calibri",14) ).pack()
myLabel6 = Label(frame1, text = " ").pack()
myLabel7 = Label(frame1, text = " ").pack()
#Upload the CSV file option
def UploadAction(event=None):
filename = filedialog.askopenfile(mode='r', filetypes=[('all files', '*csv')])
print('Selected:', filename)
# inputlabel = Label(frame1, text = "The file you uploaded is" + filename).pack()
# myLabelprint = Label(frame1, text = "The file you uploaded is"+ filename).pack()
return filename
button = tk.Button(frame1, text='Upload csv file', command=UploadAction)
button.pack()
# def open_file():
# file_path = askopenfile(mode='r', filetypes=[('all files', '*csv')])
# if file_path is not None:
# pass
# def uploadFiles():
# pb1 = Progressbar(
# frame1,
# orient=HORIZONTAL,
# length=300,
# mode='determinate'
# )
# pb1.grid(row=4, columnspan=3, pady=20)
# for i in range(5):
# frame1.update_idletasks()
# pb1['value'] += 20
# time.sleep(1)
# pb1.destroy()
# Label(frame1, text='File Uploaded Successfully!', foreground='green').grid(row=4, columnspan=3, pady=10)
# adhar = Label(
# frame1,
# text='Upload Your Dataset '
# )
# # adhar.grid(row=0, column=0, padx=10)
# print(uploadFiles)
# adharbtn = Button(
# frame1,
# text ='Choose File',
# command = lambda:open_file()
# ).pack()
# # adharbtn.grid(row=0, column=1)
# myLabel131 = Label(frame1, text = " ").pack()
# myLabel132 = Label(frame1, text = " ").pack()
# #this button is responsible about taking the uploaded file and pass it yo the system
# upld = Button(
# frame1,
# text='Upload Files',
# command=uploadFiles
# ).pack()
# # upld.grid(row=3, columnspan=3, pady=10)
myLabel8 = Label(frame1, text = " ").pack()
myLabel8 = Label(frame1, text = " ").pack()
#######################Upload file new code ############
myLabel10 = Label(frame0, text = " ").pack()
myLabel11 = Label(frame0, text = " ").pack()
myLabel12 = Label(frame0, text = " ").pack()
myLabel13 = Label(frame0, text = " ").pack()
myLabel14 = Label(frame0, text = " ").pack()
myLabel15 = Label(frame0, text = " ").pack()
myLabel16 = Label(frame0, text = " ").pack()
# myLabel17 = Label(frame0, text = " ").pack()
# myLabel18 = Label(frame0, text = " ").pack()
# myLabel19 = Label(frame0, text = " ").pack()
# myLabel20 = Label(frame0, text = " ").pack()
# myLabel21 = Label(frame0, text = " ").pack()
# myLabel22= Label(frame0, text = " ").pack()
# myLabel23 = Label(frame0, text = " ").pack()
# myLabel24 = Label(frame0, text = " ").pack()
myLabel25 = Label(frame0, text = "Welcome to the Fairness system", font = ("Forte",40) ).pack()
myLabel26 = Label(frame0, text = " ").pack()
myLabel27 = Label(frame0, text = " ").pack()
myLabel28 = Label(frame0, text = " ").pack()
myLabel29 = Label(frame0, text = " ").pack()
myLabel30 = Label(frame0, text = " ").pack()
myLabel31 = Label(frame0, text = " ").pack()
myLabel32 = Label(frame0, text = "Masters Student:", font = ("calibri",14) ).pack()
myLabel33 = Label(frame0, text = "Begum Hattatoglu", font = ("calibri",12) ).pack()
myLabel34 = Label(frame0, text = " ").pack()
myLabel35 = Label(frame0, text = " ").pack()
myLabel36 = Label(frame0, text = "Supervised by:", font = ("calibri",14)).pack()
myLabel37 = Label(frame0, text = "Hakim Qahtan", font = ("calibri",12) ).pack()
myLabel38 = Label(frame0, text = " ").pack()
myLabel39 = Label(frame0, text = " ").pack()
myLabel40 = Label(frame0, text = "Copyrights", font = ("calibri",11) ).pack()
myLabel41 = Label(frame0, text = " @Utrecht University", font = ("calibri",11) ).pack()
# canvas = Canvas(frame0, width = 2000, height =800)
# canvas.pack()
# img = ImageTk.PhotoImage(Image.open("C:/Users/Khwai001/Fairness/Project Codes/Implementation/GUI_test/assets/Fairness_Logo_scale.jpg"))
# canvas.create_image(5, 5, anchor=NW, image=img)
#############################################
# myLabel1 = Label(frame1, text = "Upload your data here").pack()
# e = Entry(frame1, width = 55)
# e.pack()
# e.insert(0, "")
# Upload = Button(frame1, text = "Upload")
# Upload.pack()
#############################################
# myLabel1 = Label(frame1, text = " ").pack()
# myLabel1 = Label(frame1, text = " ").pack()
input1 = Label(frame1, text = "Sensitive attribute 1", font = ("calibri",10) ).pack()
# input2 = Label(frame1, text = "Input is in list format").pack()
e1 = Entry(frame1, width = 10)
e1.pack()
e1.insert(0,"")
####################
input8 = Label(frame1, text = "Sensitive attribute 2", font = ("calibri",10) ).pack()
# input8 = Label(frame1, text = "Input is in list format").pack()
e5 = Entry(frame1, width = 10)
e5.pack()
e1.insert(0,"")
myLabel42 = Label(frame1, text = " ").pack()
myLabel43 = Label(frame1, text = " ").pack()
####################
input3 = Label(frame1, text = "Decision variable" ).pack()
input4 = Label(frame1, text = "String input").pack()
e2= Entry(frame1, width = 20)
e2.pack()
e2.insert(0, "")
myLabel44 = Label(frame1, text = " ").pack()
# myLabel45 = Label(frame1, text = " ").pack()
####################
input5 = Label(frame1, text = "Favorable label for sensetive attribute 1" ).pack()
input6 = Label(frame1, text = "Binary (0, 1)").pack()
e3= Entry(frame1, width = 20)
e3.pack()
e3.insert(0, "")
myLabel46 = Label(frame1, text = " ").pack()
# myLabel47 = Label(frame1, text = " ").pack()
####################
input7 = Label(frame1, text = "Favorable label for sensetive attribute 2" ).pack()
input8 = Label(frame1, text = "Binary (0, 1)").pack()
e4= Entry(frame1, width = 20 )
e4.pack()
e4.insert(0, "")
myLabel7 = Label(frame1, text = " ").pack()
# myLabel8 = Label(frame1, text = " ").pack()
proceed = Button(frame1, text = "Proceed" )
proceed.pack()
###### End of frame 1 ######
######################################################################################################
#Connecting to the data
###### Start of frame 2 ######
#create the list box to take the dataset
# listbox = Listbox(frame2)
# listbox.pack(pady = 70)
# listbox.insert(END, "German Dataset")
# listbox.insert(END, "Compas Dataset")
# listbox.insert(END, "Adult Dataset")
# def headclick():
# # if datachoice == 1:
# # german = pd.read_csv ('german.csv')
# # print(frame2, german)
# with open('german.csv', 'r') as file:
# reader = csv.reader(file)
# for row in reader:
# tkinter.messagebox.showinfo(frame2, print(row))
# elif datachoice == 2:
# compas = pd.read_csv ('compas-scores-two-years_original.csv')
# print (frame2,compas)
# elif datachoice == 3:
# adult = pd.read_csv ('adult.csv')
# print (frame2, adult)
myLabel1 = Label(frame1, text = " ").pack()
myLabel1 = Label(frame1, text = " ").pack()
def delete():
listbox.delete(ANCHOR)
def select():
label1.config(text = listbox.get(ANCHOR))
global label1
# alldata = np.vstack((X_train_reduc[0], X_train_reduc[1]))
myLabel1 = Label(frame2, text = " ").pack()
myLabel1 = Label(frame2, text = " ").pack()
datachoice = IntVar()
label1 = Label(frame2, text = 'Choose one of the datasets', font = ("calibri",18))
label1.pack(pady= 10)
#Drop Down menu:
clicked = StringVar()
clicked.set("Datasets")
# myLabel1 = Label(frame2, text = " ").pack()
# myLabel1 = Label(frame2, text = " ").pack()
myLabel1 = Label(frame1, text = " ").pack()
myLabel1 = Label(frame1, text = " ").pack()
def germanshow():
dataset_orig, privileged_groups, unprivileged_groups = aif_data("german", True)
sens_attr = ['age', 'sex']
decision_label = 'credit'
fav_l = 1
unfav_l = 0
orig_df, num_list, cat_list = preprocess(dataset_orig, sens_attr, decision_label)
# _, pg, upg, ds_orig = data_load("German")
# orig_df = mypreprocess(ds_orig)
tot_rows = 5
tot_cols = len(orig_df.columns)
h_data = orig_df.head(5)
win = tk.Toplevel()
h = Scrollbar(win, orient = 'horizontal')
win.wm_title("German Dataset Head")
t = Table(win, h_data, tot_rows, tot_cols)
l = tk.Label(win, text = t)
l.grid(row=0, column=0)
# return orig_df, privileged_groups, unprivileged_groups
def adultshow():
dataset_orig, privileged_groups, unprivileged_groups = aif_data("adult", True)
# orig = dataset_orig.convert_to_dataframe()
# print(orig[0].columns)
sens_attr = ['race', 'sex']
decision_label = 'Income Binary'
fav_l = 1
unfav_l = 0
orig_df, num_list, cat_list = preprocess(dataset_orig, sens_attr, decision_label)
# print(orig_df)
tot_rows = 5
tot_cols = len(orig_df.columns)
h_data = orig_df.head(5)
win = tk.Toplevel()
win.wm_title("Adult Dataset Head")
t = Table(win, h_data, tot_rows, tot_cols)
l = tk.Label(win, text = t)
l.grid(row=0, column=0)
return orig_df, privileged_groups, unprivileged_groups
def compasshow():
dataset_orig, privileged_groups, unprivileged_groups = aif_data("compas", True)
sens_attr = ['race', 'sex']
decision_label = 'two_year_recid'
fav_l = 1
unfav_l = 0
orig_df, num_list, cat_list = preprocess(dataset_orig, sens_attr, decision_label)
tot_rows = 5
tot_cols = len(orig_df.columns)
h_data = orig_df.head(5)
win = tk.Toplevel()
win.wm_title("Compas Dataset Head")
t = Table(win, h_data, tot_rows, tot_cols)
l = tk.Label(win, text = t)
l.grid(row=0, column=0)
return orig_df, privileged_groups, unprivileged_groups
germanbutton = Radiobutton(frame2, text = "German Dataset",variable = datachoice, value = 1, command = germanshow).pack()
adultbutton = Radiobutton(frame2, text = "Adult Dataset",variable = datachoice, value = 2, command = adultshow).pack()
compasbutton = Radiobutton(frame2, text = "Compas Dataset",variable = datachoice, value = 3, command = compasshow).pack()
myLabel1 = Label(frame2, text = " ").pack()
myLabel1 = Label(frame2, text = " ").pack()
myLabel1 = Label(frame2, text = " ").pack()
myLabel1 = Label(frame2, text = " ").pack()
myLabel1 = Label(frame2, text = " ").pack()
myLabel1 = Label(frame2, text = " ").pack()
myLabel1 = Label(frame2, text = " ").pack()
myLabel1 = Label(frame2, text = " ").pack()
# # frame2.add_separator()
# listbutton = Button(frame2, text="Delete")
# listbutton.pack(pady = 30, padx = 40)
# previuosbutton = Button(frame2, text="<<Previous", command = select)
# previuosbutton.pack(pady = 30, padx =10)
# nextbutton = Button(frame2, text="Next>>")
# nextbutton.pack(pady = 30, padx = 70)
###### End of frame 2 ######
###### Start of frame 3 ######
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = "Start Exploring your dataset", font = ("calibri",18) ).pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
def compute_initial_DI_Ratio(dataset = 'German'):
dataset_orig, privileged_groups, unprivileged_groups = aif_data(dataset, False)
metric_orig = BinaryLabelDatasetMetric(dataset_orig,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
return metric_orig, privileged_groups, unprivileged_groups
# def data_load(data_name = "german"):
# di, privileged_groups, unprivileged_groups, ds_orig = compute_initial_DI_Ratio(data_name)
# dataset_orig, privileged_groups, unprivileged_groups = aif_data(data_name, False)
# return di, privileged_groups, unprivileged_groups, ds_orig
# disp_imp, pg, upg, ds_orig = data_load()
def impratio():
data_name = "compas"
di, privileged_groups, unprivileged_groups = compute_initial_DI_Ratio(data_name)
win = tk.Toplevel()
win.wm_title("Impact ratio")
# label1 = tk.Label("Disparate impact (of original labels) between unprivileged and privileged groups = %f")
l = tk.Label(win, text = di.disparate_impact())
# label11 = tk.Label("Privileged_groups=")
m = tk.Label(win, text = privileged_groups)
# label12 = tk.Label("Unprivileged_groups=")
m2 = tk.Label(win, text = unprivileged_groups)
row_nr = 0
# label1.grid(row=row_nr, column=0)
# row_nr += 1
l.grid(row=row_nr, column=0)
row_nr += 1
# label11.grid(row=row_nr, column=0)
# row_nr += 1
m.grid(row=row_nr, column=0)
row_nr += 1
# label12.grid(row=row_nr, column=0)
# row_nr += 1
m2.grid(row=row_nr, column=0)
row_nr += 1
b = ttk.Button(win, text="Okay", command=win.destroy)
b.grid(row=row_nr, column=0)
ratio = Button(frame3, text = "Disparate Impact ratio", command = impratio)
ratio.pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
def demgraphic():
data_name = "compas"
di, privileged_groups, unprivileged_groups = compute_initial_DI_Ratio(data_name)
win = tk.Toplevel()
win.wm_title("Demographic Parity Difference")
l = tk.Label(win, text = di.statistical_parity_difference())
row_nr = 0
# label2.grid(row=row_nr, column=0)
# row_nr += 1
l.grid(row=row_nr, column=0)
row_nr += 1
b = ttk.Button(win, text="Okay", command=win.destroy)
b.grid(row=row_nr, column=0)
Demo = Button(frame3, text = "Demographic Parity Difference", command =demgraphic)
Demo.pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
def consistency():
data_name = "german"
di, privileged_groups, unprivileged_groups = compute_initial_DI_Ratio(data_name)
win = tk.Toplevel()
win.wm_title("Consistency")
l = tk.Label(win, text = di.consistency())
row_nr = 0
l.grid(row=row_nr, column=1)
row_nr += 1
b = ttk.Button(win, text="Okay", command=win.destroy)
b.grid(row=row_nr, column=1)
Consistency = Button(frame3, text = "Consistency", command =consistency)
Consistency.pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
myLabel1 = Label(frame3, text = " ").pack()
def info():
win = tk.Toplevel()
win.wm_title("Information")
l = tk.Label(win, text="PLEASE ADD THE DESCRIPTION HERE ")
l.grid(row=0, column=0)
b = ttk.Button(win, text="Okay", command=win.destroy)
b.grid(row=1, column=0)
info6 = Button(frame3, text = "Read more!", command = info )
info6.pack()
###### End of frame3 ######
###### Start of frame 4 ######
myLabel1 = Label(frame4, text = " ").pack()
myLabel1 = Label(frame4, text = " ").pack()
myLabel1 = Label(frame4, text = " ").pack()
myLabel1 = Label(frame4, text = " ").pack()
myLabel1 = Label(frame4, text = "Clustering Methods", font = ("calibri",21)).pack()
myLabel1 = Label(frame4, text = " ").pack()
myLabel1 = Label(frame4, text = " ").pack()
myLabel1 = Label(frame4, text = " ").pack()
myLabel14 = Label(frame4, text = "Fuzzy C-means plottings", font = ("calibri",16)).pack()
myLabel1 = Label(frame4, text = " ").pack()
myLabel1 = Label(frame4, text = " ").pack()
def fuzzy_2d_plot():
colors = ['b', 'orange', 'g', 'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen']
fig1, axes1 = plt.subplots(3, 3, figsize=(8, 8))
fpcs = []
#checking for the optimal num of clusters with FPC plots
for ncenters, ax in enumerate(axes1.reshape(-1), 2):
cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(
alldata, ncenters, 2, error=0.005, maxiter=1000, init=None)
# Store fpc values for later
fpcs.append(fpc)
# Plot assigned clusters, for each data point in training set
cluster_membership = np.argmax(u, axis=0)
for j in range(ncenters):
ax.plot(X_train_reduc[0][cluster_membership == j],
X_train_reduc[1][cluster_membership == j], '.', color=colors[j])
# Mark the center of each fuzzy cluster
for pt in cntr:
ax.plot(pt[0], pt[1], 'rs')
ax.set_title('Centers = {0}; FPC = {1:.2f}'.format(ncenters, fpc))
ax.axis('off')
fig1.tight_layout()
#fpc plot per number of clusters
fig2, ax2 = plt.subplots()
ax2.plot(np.r_[2:11], fpcs)
ax2.set_xlabel("Number of centers")
ax2.set_ylabel("Fuzzy partition coefficient")
return fig1, fig2
def twoDplots():
win = tk.Toplevel()
win.wm_title("Two Dimensional Plotting")
l = tk.Label(win, text = fuzzy_2d_plot())
l.grid(row=0, column=0)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=0)
return
head = Button(frame4, text = "2D plottings", command = twoDplots)
head.pack()
myLabel1 = Label(frame4, text = " ").pack()
# def cluster(n_clust, X_train_reduc):
# cntr, u_orig, _, _, _, _, _ = fuzz.cluster.cmeans(alldata, n_clust, 2, error=0.005,
# maxiter=5000)
# # u: final fuzzy-partitioned matrix, u0: initial guess at fuzzy c-partitioned matrix,
# # d: final euclidean distance matrix, jm: obj func hist, p: num of iter run,
# #fpc: fuzzy partition coefficient
# u, u0, d, jm, p, fpc = fuzz.cluster.cmeans_predict(X_train_reduc.T, cntr, 2, error=0.005,
# maxiter=5000)
# clusters = np.argmax(u, axis=0) # Hardening for silhouette
# return clusters, cntr
def silhouette_plot():
from tqdm import tqdm
n_clusters = []
silhouette_scores = []
for i in tqdm(range(2, 10)):
try:
cntr, u_orig, _, _, _, _, _ = fuzz.cluster.cmeans(alldata, i, 2, error=0.005,
maxiter=5000)
u, u0, d, jm, p, fpc = fuzz.cluster.cmeans_predict(X_train_reduc.T, cntr, 2, error=0.005,
maxiter=5000)
clusters = np.argmax(u, axis=0)
silhouette_val = silhouette_score(X_train_reduc, clusters,
metric='euclidean')
silhouette_scores.append(silhouette_val)
n_clusters.append(i)
except:
print(f"Can't cluster with {i} clusters")
plt.scatter(x=n_clusters, y=silhouette_scores)
plt.plot(n_clusters, silhouette_scores)
plt.show()
def silhouette():
win = tk.Toplevel()
win.wm_title("Silhouette")
l = tk.Label(win, text = silhouette_plot())
l.grid(row=0, column=0)
return
myLabel1 = Label(frame4, text = " ").pack()
head = Button(frame4, text = "Silhouette plot", command = silhouette)
head.pack()
myLabel1 = Label(frame4, text = " ").pack()
myLabel1 = Label(frame4, text = " ").pack()
def numofclustrers():
clf = e6.pack()
win = tk.Toplevel()
win.wm_title("Number of Clusters")
l = tk.Label(win, text = "Number of Clusters")
l.grid(row=0, column=3)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=3)
return
head = Button(frame4, text = "Number of clusters plot", command = numofclustrers )
head.pack()
myLabel1 = Label(frame4, text = " ").pack()
myLabel1 = Label(frame4, text = " ").pack()
input6 = Label(frame4, text = "Input number of clusters").pack()
e6 = Entry(frame4, width = 10)
e6.pack()
'-------------------------------------------------------------------------'
# clf = e6.get()
# clf = RandomForestClassifier()
'---------------------------------------------------'
def cluster(n_clust, X_train_reduc):
cntr, u_orig, _, _, _, _, _ = fuzz.cluster.cmeans(alldata, n_clust, 2, error=0.005,
maxiter=5000)
u, u0, d, jm, p, fpc = fuzz.cluster.cmeans_predict(X_train_reduc.T, cntr, 2, error=0.005,
maxiter=5000)
clusters = np.argmax(u, axis=0) # Hardening for silhouette
return clusters, cntr
'---------------------------------------------------'
def clusnum():
win = tk.Toplevel()
win.wm_title("Two Dimensional Plotting")
l = tk.Label(win, text = "Output to number of clusters")
l.grid(row=0, column=0)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=0)
return
myLabel1 = Label(frame4, text = " ").pack()
head = Button(frame4, text = "Take clusters number", command = clusnum)
head.pack()
###### End of frame 4 ######
###### Start of frame 5 ######
myLabel1 = Label(frame5, text = " ").pack()
myLabel1 = Label(frame5, text = " ").pack()
myLabel1 = Label(frame5, text = " ").pack()
myLabel1 = Label(frame5, text = " ").pack()
myLabel1 = Label(frame5, text = "Choose the classifications algorithm", font = ("calibri",21) ).pack()
def LR():
return
def LRtwoDplots():
win = tk.Toplevel()
win.wm_title("Two Dimensional Plotting")
l = tk.Label(win, text ="Logistic Regression Plotting ")
l.grid(row=0, column=0)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=0)
return
head = Button(frame5, text = "Logistic Regression", command = LRtwoDplots)
head.pack()
myLabel1 = Label(frame5, text = " ").pack()
def RF():
clf = RandomForestClassifier()
# NOTE: clf must come from the user!
# Getting the baseline performance results from the imbalanced dataset
# Note: the function is created based on the assumption that the X's have sub_labels
# Instantiate the desired classifier obj to train the classification models
baseline_stats, cm, ratio_table, preds = baseline_metrics(clf, X_train, X_test,
y_train, y_test, sens_attr,
fav_l, unfav_l)
return
def RFtwoDplots():
win = tk.Toplevel()
win.wm_title("Two Dimensional Plotting")
l = tk.Label(win, text ="Random Forest Plotting ")
l.grid(row=0, column=0)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=0)
return
head = Button(frame5, text = "Random Forest", command = RFtwoDplots)
head.pack()
myLabel1 = Label(frame5, text = " ").pack()
def GB():
clf = GradientBoostingClassifier()
baseline_stats, cm, ratio_table, preds = baseline_metrics(clf, X_train, X_test,
y_train, y_test, sens_attr,
fav_l, unfav_l)
return baseline_stats, cm, ratio_table
def GBtwoDplots():
win = tk.Toplevel()
win.wm_title("Two Dimensional Plotting")
l = tk.Label(win, text ="Gradient Boosting Plotting ")
l.grid(row=0, column=1)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=1)
return
head = Button(frame5, text = "Gradient Boosting", command = GBtwoDplots)
head.pack()
myLabel1 = Label(frame5, text = " ").pack()
# myLabel1 = Label(frame5, text = " ").pack()
# myLabel1 = Label(frame5, text = " ").pack()
myLabelresults = Label(frame5, text = "The Baseline Statistics", font = ("calibri",21) ).pack()
def fairnessmatrix():
win = tk.Toplevel()
win.wm_title("Fairness Matrix")
l = tk.Label(win, text ="Fairness Matrix")
l.grid(row=0, column=2)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=2)
return cm
head = Button(frame5, text = "Fairness and performance Metrics", command = fairnessmatrix)
head.pack()
myLabel1 = Label(frame5, text = " ").pack()
def commatrix():
# cm = classified_metric.binary_confusion_matrix()
win = tk.Toplevel()
win.wm_title("Binary Confusion Matrix")
l = tk.Label(win, text ="Binary Confusion Matrix")
l.grid(row=0, column=1)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=1)
return cm
head = Button(frame5, text = "Confusion Matrix", command = commatrix)
head.pack()
myLabel1 = Label(frame5, text = " ").pack()
def ratios():
win = tk.Toplevel()
win.wm_title("Ratio Matrix")
l = tk.Label(win, text ="Ratios matrix")
l.grid(row=0, column=1)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=1)
return
head = Button(frame5, text = "Ratios", command = ratios)
head.pack()
###### End of frame 5 ######
###### Start of frame 6 ######
###### End of frame 6 ######
###### Start of frame 7 ######
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = "Fairness and performance metrics tables", font = ("calibri",21) ).pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
def metrics_strategy1(X_test, X_test_pred1, y_test, sens_attr, fav_l, unfav_l):
metrics_table1, cm1, ratio_t1 = metrics_calculate(X_test, X_test_pred1, y_test,
sens_attr, fav_l, unfav_l)
return metrics_table1, cm1, ratio_t1
def m1():
win = tk.Toplevel()
win.wm_title("Strategy 1")
l = tk.Label(win, text ="Metric Table: " )
l1 = tk.Label(win, text = "Confusion Matrix: " )
l2 = tk.Label(win, text = "Ratio Table: " )
l.grid(row=0, column=3)
l1.grid(row=1, column=3)
l2.grid(row=2, column=3)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=3)
return
myLabel14 = Label(frame7, text = "Confusion matrix", font = ("calibri",14) ).pack()
myLabel1 = Label(frame7, text = " ").pack()
head = Button(frame7, text = "Strategy 1", command = m1)
head.pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
def metrics_strategy1(X_test, X_test_pred2, y_test, sens_attr, fav_l, unfav_l):
metrics_table2, cm2, ratio_t2 = metrics_calculate(X_test, X_test_pred2, y_test,
sens_attr, fav_l, unfav_l)
return metrics_table2, cm2, ratio_t2
def m2():
win = tk.Toplevel()
win.wm_title("Strategy 2")
l = tk.Label(win, text ="Metric Table: " )
l1 = tk.Label(win, text = "Confusion Matrix: " )
l2 = tk.Label(win, text = "Ratio Table: " )
l.grid(row=0, column=3)
l1.grid(row=1, column=3)
l2.grid(row=2, column=3)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=3)
return
myLabel14 = Label(frame7, text = "Subgroup ratio table", font = ("calibri",14) ).pack()
myLabel1 = Label(frame7, text = " ").pack()
head = Button(frame7, text = "Strategy 2", command = m2)
head.pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
def metrics_strategy1(X_test, X_test_pred3, y_test, sens_attr, fav_l, unfav_l):
metrics_table3, cm3, ratio_t3 = metrics_calculate(X_test, X_test_pred3, y_test,
sens_attr, fav_l, unfav_l)
return metrics_table3, cm3, ratio_t3
def m3():
win = tk.Toplevel()
win.wm_title("Strategy 3")
l = tk.Label(win, text ="Metric Table: " )
l1 = tk.Label(win, text = "Confusion Matrix:" )
l2 = tk.Label(win, text = "Ratio Table: " )
l.grid(row=0, column=3)
l1.grid(row=1, column=3)
l2.grid(row=2, column=3)
b = tk.Button(win, text="Okay", command=win.destroy)
b.grid(row=3, column=3)
return
myLabel14 = Label(frame7, text = "Gradient Boosting Trees", font = ("calibri",14) ).pack()
myLabel1 = Label(frame7, text = " ").pack()
head = Button(frame7, text = "Strategy 3", command = m3)
head.pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
myLabel1 = Label(frame7, text = " ").pack()
###### End of frame 7 ######
#############################
#Bulding up the menu
#############################
file_menu = Menu(my_menu)
#############################
my_menu.add_cascade(label = "File", menu= file_menu) #new bar tick
file_menu.add_command(label="New", command = file_menu) #new bar tick
file_menu.add_separator()
file_menu.add_command(label="Exit", command = root.quit)
##############################
edit_menu = Menu(my_menu)
my_menu.add_cascade(label= "Edit", menu = edit_menu) #new bar tick
edit_menu.add_command(label = "Cut", command = our_command())
edit_menu.add_separator()
edit_menu.add_command(label = "Copy", command = our_command())
##############################
my_menu.add_cascade(label = "Help")
##############################
options_menu = Menu(my_menu)
my_menu.add_cascade(label= "Options", menu = options_menu) #new bar tick
options_menu.add_command(label = "Find", command = our_command())
options_menu.add_separator()
options_menu.add_command(label = "Find next", command = our_command())
root.mainloop() | 66.982759 | 280 | 0.324003 |
4c6fcb7fc0e8b4fa908a11717f5397ff7dfd9bfb | 2,219 | py | Python | irekua_database/models/terms/entailments.py | CONABIO-audio/irekua-database | abaf3eb3c5273cdb973c7ac1b921ab2f9759042c | [
"BSD-4-Clause"
] | null | null | null | irekua_database/models/terms/entailments.py | CONABIO-audio/irekua-database | abaf3eb3c5273cdb973c7ac1b921ab2f9759042c | [
"BSD-4-Clause"
] | 18 | 2019-10-31T21:41:42.000Z | 2022-03-12T00:03:54.000Z | irekua_database/models/terms/entailments.py | IslasGECI/irekua-database | abaf3eb3c5273cdb973c7ac1b921ab2f9759042c | [
"BSD-4-Clause"
] | 1 | 2021-05-06T19:38:21.000Z | 2021-05-06T19:38:21.000Z | from django.db.models import JSONField
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ValidationError
from irekua_database.models.object_types.entailment_types import EntailmentType
from irekua_database.models import base
class Entailment(base.IrekuaModelBase):
source = models.ForeignKey(
'Term',
related_name='entailment_source',
db_column='source_id',
verbose_name=_('source'),
help_text=_('Source of entailment'),
on_delete=models.CASCADE,
blank=False)
target = models.ForeignKey(
'Term',
related_name='entailment_target',
db_column='target_id',
verbose_name=_('target'),
help_text=_('Target of entailment'),
on_delete=models.CASCADE,
blank=False)
metadata = JSONField(
db_column='metadata',
verbose_name=_('metadata'),
help_text=_('Metadata associated to entailment'),
blank=True,
null=True)
class Meta:
verbose_name = _('Entailment')
verbose_name_plural = _('Entailments')
ordering = ['source']
def __str__(self):
msg = '%(source)s => %(target)s'
params = dict(
source=str(self.source),
target=str(self.target))
return msg % params
def clean(self):
try:
entailment_type = EntailmentType.objects.get(
source_type=self.source.term_type,
target_type=self.target.term_type)
except EntailmentType.DoesNotExist:
msg = _('Entailment between types %(source_type)s and %(target_type)s is not possible')
params = dict(
source_type=self.source.term_type,
target_type=self.target.term_type)
raise ValidationError({'target': msg % params})
try:
entailment_type.validate_metadata(self.metadata)
except ValidationError as error:
msg = _('Invalid entailment metadata. Error %(error)s')
params = dict(error=str(error))
raise ValidationError({'metadata': msg % params})
super(Entailment, self).clean()
| 33.119403 | 99 | 0.628211 |
9bcec6b41983fa0bd7ee3735aceb09fce8b265de | 61,211 | py | Python | tests/unit/gapic/compute_v1/test_region_autoscalers.py | vam-google/python-compute | 799f2f55e5e205317862a17ca7ed548ce2ca66e5 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/compute_v1/test_region_autoscalers.py | vam-google/python-compute | 799f2f55e5e205317862a17ca7ed548ce2ca66e5 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/compute_v1/test_region_autoscalers.py | vam-google/python-compute | 799f2f55e5e205317862a17ca7ed548ce2ca66e5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.region_autoscalers import RegionAutoscalersClient
from google.cloud.compute_v1.services.region_autoscalers import pagers
from google.cloud.compute_v1.services.region_autoscalers import transports
from google.cloud.compute_v1.services.region_autoscalers.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RegionAutoscalersClient._get_default_mtls_endpoint(None) is None
assert (
RegionAutoscalersClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
RegionAutoscalersClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
RegionAutoscalersClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionAutoscalersClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionAutoscalersClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize("client_class", [RegionAutoscalersClient,])
def test_region_autoscalers_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize("client_class", [RegionAutoscalersClient,])
def test_region_autoscalers_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
def test_region_autoscalers_client_get_transport_class():
transport = RegionAutoscalersClient.get_transport_class()
available_transports = [
transports.RegionAutoscalersRestTransport,
]
assert transport in available_transports
transport = RegionAutoscalersClient.get_transport_class("rest")
assert transport == transports.RegionAutoscalersRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionAutoscalersClient, transports.RegionAutoscalersRestTransport, "rest"),],
)
@mock.patch.object(
RegionAutoscalersClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionAutoscalersClient),
)
def test_region_autoscalers_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(RegionAutoscalersClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(RegionAutoscalersClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
RegionAutoscalersClient,
transports.RegionAutoscalersRestTransport,
"rest",
"true",
),
(
RegionAutoscalersClient,
transports.RegionAutoscalersRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
RegionAutoscalersClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionAutoscalersClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_region_autoscalers_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionAutoscalersClient, transports.RegionAutoscalersRestTransport, "rest"),],
)
def test_region_autoscalers_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionAutoscalersClient, transports.RegionAutoscalersRestTransport, "rest"),],
)
def test_region_autoscalers_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_delete_rest(
transport: str = "rest", request_type=compute.DeleteRegionAutoscalerRequest
):
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete(
project="project_value",
region="region_value",
autoscaler="autoscaler_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "region_value" in http_call[1] + str(body)
assert "autoscaler_value" in http_call[1] + str(body)
def test_delete_rest_flattened_error():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteRegionAutoscalerRequest(),
project="project_value",
region="region_value",
autoscaler="autoscaler_value",
)
def test_get_rest(
transport: str = "rest", request_type=compute.GetRegionAutoscalerRequest
):
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Autoscaler(
autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112),
creation_timestamp="creation_timestamp_value",
description="description_value",
id="id_value",
kind="kind_value",
name="name_value",
recommended_size=1693,
region="region_value",
scaling_schedule_status={
"key_value": compute.ScalingScheduleStatus(
last_start_time="last_start_time_value"
)
},
self_link="self_link_value",
status=compute.Autoscaler.Status.ACTIVE,
status_details=[compute.AutoscalerStatusDetails(message="message_value")],
target="target_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Autoscaler.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Autoscaler)
assert response.autoscaling_policy == compute.AutoscalingPolicy(
cool_down_period_sec=2112
)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.recommended_size == 1693
assert response.region == "region_value"
assert response.scaling_schedule_status == {
"key_value": compute.ScalingScheduleStatus(
last_start_time="last_start_time_value"
)
}
assert response.self_link == "self_link_value"
assert response.status == compute.Autoscaler.Status.ACTIVE
assert response.status_details == [
compute.AutoscalerStatusDetails(message="message_value")
]
assert response.target == "target_value"
assert response.zone == "zone_value"
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Autoscaler()
# Wrap the value into a proper Response obj
json_return_value = compute.Autoscaler.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get(
project="project_value",
region="region_value",
autoscaler="autoscaler_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "region_value" in http_call[1] + str(body)
assert "autoscaler_value" in http_call[1] + str(body)
def test_get_rest_flattened_error():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetRegionAutoscalerRequest(),
project="project_value",
region="region_value",
autoscaler="autoscaler_value",
)
def test_insert_rest(
transport: str = "rest", request_type=compute.InsertRegionAutoscalerRequest
):
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_insert_rest_from_dict():
test_insert_rest(request_type=dict)
def test_insert_rest_flattened():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
autoscaler_resource = compute.Autoscaler(
autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)
)
client.insert(
project="project_value",
region="region_value",
autoscaler_resource=autoscaler_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "region_value" in http_call[1] + str(body)
assert compute.Autoscaler.to_json(
autoscaler_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_insert_rest_flattened_error():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert(
compute.InsertRegionAutoscalerRequest(),
project="project_value",
region="region_value",
autoscaler_resource=compute.Autoscaler(
autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)
),
)
def test_list_rest(
transport: str = "rest", request_type=compute.ListRegionAutoscalersRequest
):
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionAutoscalerList(
id="id_value",
items=[
compute.Autoscaler(
autoscaling_policy=compute.AutoscalingPolicy(
cool_down_period_sec=2112
)
)
],
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
warning=compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED),
)
# Wrap the value into a proper Response obj
json_return_value = compute.RegionAutoscalerList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.items == [
compute.Autoscaler(
autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)
)
]
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.warning == compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.RegionAutoscalerList()
# Wrap the value into a proper Response obj
json_return_value = compute.RegionAutoscalerList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list(
project="project_value", region="region_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "region_value" in http_call[1] + str(body)
def test_list_rest_flattened_error():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListRegionAutoscalersRequest(),
project="project_value",
region="region_value",
)
def test_list_pager():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Set the response as a series of pages
response = (
compute.RegionAutoscalerList(
items=[
compute.Autoscaler(),
compute.Autoscaler(),
compute.Autoscaler(),
],
next_page_token="abc",
),
compute.RegionAutoscalerList(items=[], next_page_token="def",),
compute.RegionAutoscalerList(
items=[compute.Autoscaler(),], next_page_token="ghi",
),
compute.RegionAutoscalerList(
items=[compute.Autoscaler(), compute.Autoscaler(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.RegionAutoscalerList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
metadata = ()
pager = client.list(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.Autoscaler) for i in results)
pages = list(client.list(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_patch_rest(
transport: str = "rest", request_type=compute.PatchRegionAutoscalerRequest
):
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_patch_rest_from_dict():
test_patch_rest(request_type=dict)
def test_patch_rest_flattened():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
autoscaler_resource = compute.Autoscaler(
autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)
)
client.patch(
project="project_value",
region="region_value",
autoscaler_resource=autoscaler_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "region_value" in http_call[1] + str(body)
assert compute.Autoscaler.to_json(
autoscaler_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_patch_rest_flattened_error():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch(
compute.PatchRegionAutoscalerRequest(),
project="project_value",
region="region_value",
autoscaler_resource=compute.Autoscaler(
autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)
),
)
def test_update_rest(
transport: str = "rest", request_type=compute.UpdateRegionAutoscalerRequest
):
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_update_rest_from_dict():
test_update_rest(request_type=dict)
def test_update_rest_flattened():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
autoscaler_resource = compute.Autoscaler(
autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)
)
client.update(
project="project_value",
region="region_value",
autoscaler_resource=autoscaler_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "region_value" in http_call[1] + str(body)
assert compute.Autoscaler.to_json(
autoscaler_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_update_rest_flattened_error():
client = RegionAutoscalersClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update(
compute.UpdateRegionAutoscalerRequest(),
project="project_value",
region="region_value",
autoscaler_resource=compute.Autoscaler(
autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RegionAutoscalersRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RegionAutoscalersRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionAutoscalersClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.RegionAutoscalersRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionAutoscalersClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RegionAutoscalersRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RegionAutoscalersClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize(
"transport_class", [transports.RegionAutoscalersRestTransport,]
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_region_autoscalers_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RegionAutoscalersTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_region_autoscalers_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.region_autoscalers.transports.RegionAutoscalersTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.RegionAutoscalersTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"insert",
"list",
"patch",
"update",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_region_autoscalers_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.region_autoscalers.transports.RegionAutoscalersTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionAutoscalersTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_region_autoscalers_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.region_autoscalers.transports.RegionAutoscalersTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionAutoscalersTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_region_autoscalers_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.region_autoscalers.transports.RegionAutoscalersTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionAutoscalersTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_region_autoscalers_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RegionAutoscalersClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_region_autoscalers_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RegionAutoscalersClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_region_autoscalers_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.RegionAutoscalersRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_region_autoscalers_host_no_port():
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_region_autoscalers_host_with_port():
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = RegionAutoscalersClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = RegionAutoscalersClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RegionAutoscalersClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = RegionAutoscalersClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = RegionAutoscalersClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RegionAutoscalersClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = RegionAutoscalersClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = RegionAutoscalersClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RegionAutoscalersClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = RegionAutoscalersClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = RegionAutoscalersClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RegionAutoscalersClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = RegionAutoscalersClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = RegionAutoscalersClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RegionAutoscalersClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.RegionAutoscalersTransport, "_prep_wrapped_messages"
) as prep:
client = RegionAutoscalersClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.RegionAutoscalersTransport, "_prep_wrapped_messages"
) as prep:
transport_class = RegionAutoscalersClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| 39.747403 | 122 | 0.688749 |
3a0dabaf5d81e494549ed78ad91d85130c302e95 | 1,905 | py | Python | tests.dev/bundle_list.py | Clarify/clarify_python | 1a00a5e39f77af9ad7f2e08480a3ab14e7d72aeb | [
"MIT"
] | 5 | 2015-07-22T18:50:32.000Z | 2020-12-06T12:08:37.000Z | tests.dev/bundle_list.py | Clarify/clarify_python | 1a00a5e39f77af9ad7f2e08480a3ab14e7d72aeb | [
"MIT"
] | 1 | 2015-04-15T04:48:44.000Z | 2015-04-18T22:01:41.000Z | tests.dev/bundle_list.py | Clarify/clarify_python | 1a00a5e39f77af9ad7f2e08480a3ab14e7d72aeb | [
"MIT"
] | 2 | 2015-03-11T19:04:14.000Z | 2015-03-12T16:12:37.000Z | #!/usr/bin/env python
"""
Some test functions used to sanity check during development. Not
unit tests.
"""
import sys
sys.path.insert(0, '..')
from clarify_python import clarify
def get_first_page_hrefs(client):
"""Print first page of bundle hrefs."""
bundle_list = client.get_bundle_list()
print('*** Available bundles: ' + str(bundle_list['total']))
print('*** Printing first page of hrefs retrieved (max 10)...')
for i in bundle_list['_links']['items']:
print(i['href'])
def get_all_bundle_hrefs(client):
"""Print all bundle hrefs."""
print('*** Printing all available bundle hrefs...')
client.bundle_list_map(print_href)
def get_all_bundles(client):
"""Print all bundles."""
print('*** Printing all available bundle...')
client.bundle_list_map(print_bundle)
def print_href(client, href):
"""Function to print an href."""
print(href)
def print_bundle(client, href):
"""Function to print an bundle from an href."""
bundle = client.get_bundle(href)
print('* Bundle ' + bundle['id'] + '...')
if 'name' in bundle:
print('name: ' + bundle['name'])
if 'external_id' in bundle:
print('external_id: ' + bundle['external_id'])
if 'notify_url' in bundle:
print('notify_url: ' + bundle['notify_url'])
print('created: ' + bundle['created'])
print('updated: ' + bundle['updated'])
def all_tests(apikey):
"""Set API key and call all test functions."""
client = clarify.Client(apikey)
print('===== get_first_page_hrefs() =====')
get_first_page_hrefs(client)
print('===== get_all_bundle_hrefs() =====')
get_all_bundle_hrefs(client)
print('===== get_all_bundles() =====')
get_all_bundles(client)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: ' + sys.argv[0] + ' <apikey>')
exit(1)
all_tests(sys.argv[1])
| 24.113924 | 67 | 0.628871 |
c2d6cc61bbe0566ef619ab6cbb843c0f9c6d53dd | 1,828 | py | Python | spyder/plugins/projects/widgets/tests/test_project_explorer.py | aglotero/spyder | 075d32fa359b728416de36cb0e744715fa5e3943 | [
"MIT"
] | 2 | 2019-04-25T08:25:37.000Z | 2019-04-25T08:25:43.000Z | spyder/plugins/projects/widgets/tests/test_project_explorer.py | aglotero/spyder | 075d32fa359b728416de36cb0e744715fa5e3943 | [
"MIT"
] | null | null | null | spyder/plugins/projects/widgets/tests/test_project_explorer.py | aglotero/spyder | 075d32fa359b728416de36cb0e744715fa5e3943 | [
"MIT"
] | 1 | 2019-02-18T01:28:51.000Z | 2019-02-18T01:28:51.000Z | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for explorer.py
"""
# Standard imports
import os
import os.path as osp
# Test library imports
import pytest
# Local imports
from spyder.plugins.projects.widgets.explorer import ProjectExplorerTest
from spyder.py3compat import to_text_string
@pytest.fixture
def project_explorer(qtbot, request, tmpdir):
"""Setup Project Explorer widget."""
directory = request.node.get_marker('change_directory')
if directory:
project_dir = to_text_string(tmpdir.mkdir('project'))
else:
project_dir = None
project_explorer = ProjectExplorerTest(directory=project_dir)
qtbot.addWidget(project_explorer)
return project_explorer
@pytest.mark.change_directory
def test_change_directory_in_project_explorer(project_explorer, qtbot):
"""Test changing a file from directory in the Project explorer."""
# Create project
project = project_explorer
project_dir = project.directory
# Create a temp project directory and file
project_dir_tmp = osp.join(project_dir, u'測試')
project_file = osp.join(project_dir, 'script.py')
# Create an empty file in the project dir
os.mkdir(project_dir_tmp)
open(project_file, 'w').close()
# Move Python file
project.explorer.treewidget.move(
fnames=[osp.join(project_dir, 'script.py')],
directory=project_dir_tmp)
# Assert content was moved
assert osp.isfile(osp.join(project_dir_tmp, 'script.py'))
def test_project_explorer(project_explorer, qtbot):
"""Run project explorer."""
project = project_explorer
project.resize(250, 480)
project.show()
assert project
if __name__ == "__main__":
pytest.main()
| 26.492754 | 72 | 0.708972 |
7961d51b7d07f9e212c0ba1beeadc1e5ca9d4db4 | 914 | py | Python | rating/models/result.py | bomzheg/equestrian-rating | d880dcb40500b3b78d4c10f387b15ac4a60428ce | [
"MIT"
] | null | null | null | rating/models/result.py | bomzheg/equestrian-rating | d880dcb40500b3b78d4c10f387b15ac4a60428ce | [
"MIT"
] | null | null | null | rating/models/result.py | bomzheg/equestrian-rating | d880dcb40500b3b78d4c10f387b15ac4a60428ce | [
"MIT"
] | null | null | null | from django.db import models
class Result(models.Model):
"""Данные о конкретном случае выполненного норматива."""
fulfilled_standard = models.ForeignKey(
"Standard",
on_delete=models.PROTECT,
related_name="results",
verbose_name="Норматив",
)
date = models.DateField(verbose_name="Дата соревнований")
horse_name = models.CharField(max_length=64, verbose_name="Лошадь")
athlete_name = models.CharField(max_length=128, verbose_name="Спортсмен")
club_name = models.CharField(max_length=64, verbose_name="Клуб", null=True)
class Meta:
db_table = "results"
verbose_name = "Выполнивший норматив"
verbose_name_plural = "Выполнившие норматив"
def __str__(self):
return (
f"{self.date}: {self.athlete_name} "
f"на лошади по кличке {self.horse_name}"
f"из {self.club_name}"
)
| 32.642857 | 79 | 0.657549 |
8e5affe98958eb983445350540bd65c1731c4b11 | 5,052 | py | Python | tests/core/consensus/test_pot_iterations.py | zcomputerwiz/replaceme-blockchain | b6dfc3ff502e0cb6b7b5fbc566c7fb9ae559757a | [
"Apache-2.0"
] | null | null | null | tests/core/consensus/test_pot_iterations.py | zcomputerwiz/replaceme-blockchain | b6dfc3ff502e0cb6b7b5fbc566c7fb9ae559757a | [
"Apache-2.0"
] | null | null | null | tests/core/consensus/test_pot_iterations.py | zcomputerwiz/replaceme-blockchain | b6dfc3ff502e0cb6b7b5fbc566c7fb9ae559757a | [
"Apache-2.0"
] | null | null | null | from pytest import raises
from replaceme.consensus.default_constants import DEFAULT_CONSTANTS
from replaceme.consensus.pos_quality import _expected_plot_size
from replaceme.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_iters,
is_overflow_block,
)
from replaceme.util.hash import std_hash
from replaceme.util.ints import uint8, uint64
test_constants = DEFAULT_CONSTANTS.replace(**{"NUM_SPS_SUB_SLOT": 32, "SUB_SLOT_TIME_TARGET": 300})
class TestPotIterations:
def test_is_overflow_block(self):
assert not is_overflow_block(test_constants, uint8(27))
assert not is_overflow_block(test_constants, uint8(28))
assert is_overflow_block(test_constants, uint8(29))
assert is_overflow_block(test_constants, uint8(30))
assert is_overflow_block(test_constants, uint8(31))
with raises(ValueError):
assert is_overflow_block(test_constants, uint8(32))
def test_calculate_sp_iters(self):
ssi: uint64 = uint64(100001 * 64 * 4)
with raises(ValueError):
calculate_sp_iters(test_constants, ssi, uint8(32))
calculate_sp_iters(test_constants, ssi, uint8(31))
def test_calculate_ip_iters(self):
ssi: uint64 = uint64(100001 * 64 * 4)
sp_interval_iters = ssi // test_constants.NUM_SPS_SUB_SLOT
with raises(ValueError):
# Invalid signage point index
calculate_ip_iters(test_constants, ssi, uint8(123), uint64(100000))
sp_iters = sp_interval_iters * 13
with raises(ValueError):
# required_iters too high
calculate_ip_iters(test_constants, ssi, sp_interval_iters, sp_interval_iters)
with raises(ValueError):
# required_iters too high
calculate_ip_iters(test_constants, ssi, sp_interval_iters, sp_interval_iters * 12)
with raises(ValueError):
# required_iters too low (0)
calculate_ip_iters(test_constants, ssi, sp_interval_iters, uint64(0))
required_iters = sp_interval_iters - 1
ip_iters = calculate_ip_iters(test_constants, ssi, uint8(13), required_iters)
assert ip_iters == sp_iters + test_constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters
required_iters = uint64(1)
ip_iters = calculate_ip_iters(test_constants, ssi, uint8(13), required_iters)
assert ip_iters == sp_iters + test_constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters
required_iters = uint64(int(ssi * 4 / 300))
ip_iters = calculate_ip_iters(test_constants, ssi, uint8(13), required_iters)
assert ip_iters == sp_iters + test_constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters
assert sp_iters < ip_iters
# Overflow
sp_iters = sp_interval_iters * (test_constants.NUM_SPS_SUB_SLOT - 1)
ip_iters = calculate_ip_iters(
test_constants,
ssi,
uint8(test_constants.NUM_SPS_SUB_SLOT - 1),
required_iters,
)
assert ip_iters == (sp_iters + test_constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters) % ssi
assert sp_iters > ip_iters
def test_win_percentage(self):
"""
Tests that the percentage of blocks won is proportional to the space of each farmer,
with the assumption that all farmers have access to the same VDF speed.
"""
farmer_ks = {
uint8(32): 100,
uint8(33): 100,
uint8(34): 100,
uint8(35): 100,
uint8(36): 100,
}
farmer_space = {k: _expected_plot_size(uint8(k)) * count for k, count in farmer_ks.items()}
total_space = sum(farmer_space.values())
percentage_space = {k: float(sp / total_space) for k, sp in farmer_space.items()}
wins = {k: 0 for k in farmer_ks.keys()}
total_slots = 50
num_sps = 16
sp_interval_iters = uint64(100000000 // 32)
difficulty = uint64(500000000000)
for slot_index in range(total_slots):
total_wins_in_slot = 0
for sp_index in range(num_sps):
sp_hash = std_hash(slot_index.to_bytes(4, "big") + sp_index.to_bytes(4, "big"))
for k, count in farmer_ks.items():
for farmer_index in range(count):
quality = std_hash(slot_index.to_bytes(4, "big") + k.to_bytes(1, "big") + bytes(farmer_index))
required_iters = calculate_iterations_quality(2 ** 25, quality, k, difficulty, sp_hash)
if required_iters < sp_interval_iters:
wins[k] += 1
total_wins_in_slot += 1
win_percentage = {k: wins[k] / sum(wins.values()) for k in farmer_ks.keys()}
for k in farmer_ks.keys():
# Win rate is proportional to percentage of space
assert abs(win_percentage[k] - percentage_space[k]) < 0.01
| 43.551724 | 120 | 0.65677 |
b6398cd8e5e8712abb26c5b22fbf2422ccf3bdfb | 475 | py | Python | solutions/1002.find-common-characters.229280375.ac.py | satu0king/Leetcode-Solutions | 2edff60d76c2898d912197044f6284efeeb34119 | [
"MIT"
] | 78 | 2020-10-22T11:31:53.000Z | 2022-02-22T13:27:49.000Z | solutions/1002.find-common-characters.229280375.ac.py | satu0king/Leetcode-Solutions | 2edff60d76c2898d912197044f6284efeeb34119 | [
"MIT"
] | null | null | null | solutions/1002.find-common-characters.229280375.ac.py | satu0king/Leetcode-Solutions | 2edff60d76c2898d912197044f6284efeeb34119 | [
"MIT"
] | 26 | 2020-10-23T15:10:44.000Z | 2021-11-07T16:13:50.000Z | class Solution:
def commonChars(self, A):
answer = collections.Counter(A[0])
for word in A[1:]:
temp = collections.Counter(word)
for c in answer:
if c in temp:
answer[c] = min(answer[c], temp[c])
else:
answer[c] = 0
ans = []
for k, v in answer.items():
ans.extend([k]*v)
return ans
| 23.75 | 55 | 0.402105 |
644ecd6b1fb818d037dc235c8e94d37d07b75b9e | 3,947 | py | Python | experiments/neuron-segmentation/isbi2012/export_bioimageio_model.py | JoOkuma/torch-em | 68b723683f9013723a0e4fc8cfef1d6a2a9c9dff | [
"MIT"
] | null | null | null | experiments/neuron-segmentation/isbi2012/export_bioimageio_model.py | JoOkuma/torch-em | 68b723683f9013723a0e4fc8cfef1d6a2a9c9dff | [
"MIT"
] | null | null | null | experiments/neuron-segmentation/isbi2012/export_bioimageio_model.py | JoOkuma/torch-em | 68b723683f9013723a0e4fc8cfef1d6a2a9c9dff | [
"MIT"
] | null | null | null | import argparse
import os
import numpy as np
from elf.io import open_file
from torch_em.util import (convert_to_onnx, convert_to_pytorch_script,
export_biomageio_model, get_default_citations)
def _load_data(input_, ndim):
with open_file(input_, 'r') as f:
ds = f['volumes/raw'] if 'volumes/raw' in f else f['raw']
shape = ds.shape
if ndim == 2:
s0, s1 = shape[0] - 1, shape[0]
bb = np.s_[s0:s1, :, :]
else:
assert False, "3d not supported yet"
raw = ds[bb]
return raw
def _get_name(is_aff, ndim):
name = "ISBI2012"
name += "-2D" if ndim == 2 else "-3D"
if is_aff:
name += "-AffinityModel"
else:
name += "-BoundaryModel"
return name
def _get_doc(is_aff_model, ndim):
if is_aff_model:
doc = f"""
## {ndim}D U-Net for Affinity Prediction
This model was trained on the data of the ISBI2012 neuron segmentation challenge.
It predicts affinity maps that can be processed with the mutex watershed to obtain
an instance segmentation.
"""
else:
doc = f"""
## {ndim}D U-Net for Boundary Prediction
This model was trained on the data of the ISBI2012 neuron segmentation challenge.
It predicts boundary maps that can be processed with multicut segmentation to obtain
an instance segmentation.
"""
return doc
# need to wait on the spec pr to fix this.
# TODO write offsets and other mws params into the config if this is a affinity model
def export_to_bioimageio(checkpoint, input_, output, affs_to_bd, additional_formats):
ckpt_name = os.path.split(checkpoint)[1]
ndim = 3 if '3d' in ckpt_name else 2
input_data = _load_data(input_, ndim)
is_aff_model = 'affinity' in ckpt_name
if is_aff_model and affs_to_bd:
postprocessing = f'affinities_to_boundaries{ndim}d'
else:
postprocessing = None
if is_aff_model and affs_to_bd:
is_aff_model = False
name = _get_name(is_aff_model, ndim)
tags = ["u-net", "neuron-segmentation", "segmentation", "volume-em"]
tags += ["boundary-prediction"] if is_aff_model else ["affinity-prediction"]
# eventually we should refactor the citation logic
cite = get_default_citations()
cite["data"] = "doi.org/10.3389/fnana.2015.00142"
if ndim == 2:
cite["architecture"] = "https://link.springer.com/chapter/10.1007/978-3-319-24574-4_28"
else:
cite["architecture"] = "https://link.springer.com/chapter/10.1007/978-3-319-46723-8_49"
if is_aff_model:
cite["segmentation algorithm"] = "10.1109/TPAMI.2020.2980827"
doc = _get_doc(is_aff_model, ndim)
export_biomageio_model(
checkpoint, output, input_data,
name=name,
authors=[{"name": "Constantin Pape; @constantinpape"}],
tags=tags,
license='CC-BY-4.0',
documentation=doc,
git_repo='https://github.com/constantinpape/torch-em.git',
cite=cite,
model_postprocessing=postprocessing,
input_optional_parameters=False
)
if additional_formats:
spec_path = os.path.join(output, "model.yaml")
for add_format in additional_formats:
if add_format == "onnx":
convert_to_onnx(spec_path)
elif add_format == "torchscript":
convert_to_pytorch_script(spec_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--checkpoint', required=True)
parser.add_argument('-i', '--input', required=True)
parser.add_argument('-o', '--output', required=True)
parser.add_argument('-a', '--affs_to_bd', default=0, type=int)
parser.add_argument('-f', '--additional_formats', type=str, nargs="+")
args = parser.parse_args()
export_to_bioimageio(args.checkpoint, args.input, args.output,
bool(args.affs_to_bd), args.additional_formats)
| 33.449153 | 95 | 0.654674 |
44468f5382f9ba62b9bb22fe299140a42ba283f0 | 13,781 | py | Python | tests/handlers/test_user_directory.py | Kolatzek/synapse | 7ce1f97a1353e7bd9232c22a20835e40fa5662e0 | [
"Apache-2.0"
] | null | null | null | tests/handlers/test_user_directory.py | Kolatzek/synapse | 7ce1f97a1353e7bd9232c22a20835e40fa5662e0 | [
"Apache-2.0"
] | null | null | null | tests/handlers/test_user_directory.py | Kolatzek/synapse | 7ce1f97a1353e7bd9232c22a20835e40fa5662e0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 New Vector
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock
import synapse.rest.admin
from synapse.api.constants import UserTypes
from synapse.rest.client.v1 import login, room
from synapse.rest.client.v2_alpha import user_directory
from synapse.storage.roommember import ProfileInfo
from tests import unittest
class UserDirectoryTestCase(unittest.HomeserverTestCase):
"""
Tests the UserDirectoryHandler.
"""
servlets = [
login.register_servlets,
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
]
def make_homeserver(self, reactor, clock):
config = self.default_config()
config.update_user_directory = True
return self.setup_test_homeserver(config=config)
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.handler = hs.get_user_directory_handler()
def test_handle_local_profile_change_with_support_user(self):
support_user_id = "@support:test"
self.get_success(
self.store.register(
user_id=support_user_id,
token="123",
password_hash=None,
user_type=UserTypes.SUPPORT,
)
)
self.get_success(
self.handler.handle_local_profile_change(support_user_id, None)
)
profile = self.get_success(self.store.get_user_in_directory(support_user_id))
self.assertTrue(profile is None)
display_name = 'display_name'
profile_info = ProfileInfo(avatar_url='avatar_url', display_name=display_name)
regular_user_id = '@regular:test'
self.get_success(
self.handler.handle_local_profile_change(regular_user_id, profile_info)
)
profile = self.get_success(self.store.get_user_in_directory(regular_user_id))
self.assertTrue(profile['display_name'] == display_name)
def test_handle_user_deactivated_support_user(self):
s_user_id = "@support:test"
self.get_success(
self.store.register(
user_id=s_user_id,
token="123",
password_hash=None,
user_type=UserTypes.SUPPORT,
)
)
self.store.remove_from_user_dir = Mock()
self.store.remove_from_user_in_public_room = Mock()
self.get_success(self.handler.handle_user_deactivated(s_user_id))
self.store.remove_from_user_dir.not_called()
self.store.remove_from_user_in_public_room.not_called()
def test_handle_user_deactivated_regular_user(self):
r_user_id = "@regular:test"
self.get_success(
self.store.register(user_id=r_user_id, token="123", password_hash=None)
)
self.store.remove_from_user_dir = Mock()
self.get_success(self.handler.handle_user_deactivated(r_user_id))
self.store.remove_from_user_dir.called_once_with(r_user_id)
def test_private_room(self):
"""
A user can be searched for only by people that are either in a public
room, or that share a private chat.
"""
u1 = self.register_user("user1", "pass")
u1_token = self.login(u1, "pass")
u2 = self.register_user("user2", "pass")
u2_token = self.login(u2, "pass")
u3 = self.register_user("user3", "pass")
# We do not add users to the directory until they join a room.
s = self.get_success(self.handler.search_users(u1, "user2", 10))
self.assertEqual(len(s["results"]), 0)
room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)
self.helper.invite(room, src=u1, targ=u2, tok=u1_token)
self.helper.join(room, user=u2, tok=u2_token)
# Check we have populated the database correctly.
shares_private = self.get_users_who_share_private_rooms()
public_users = self.get_users_in_public_rooms()
self.assertEqual(
self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)])
)
self.assertEqual(public_users, [])
# We get one search result when searching for user2 by user1.
s = self.get_success(self.handler.search_users(u1, "user2", 10))
self.assertEqual(len(s["results"]), 1)
# We get NO search results when searching for user2 by user3.
s = self.get_success(self.handler.search_users(u3, "user2", 10))
self.assertEqual(len(s["results"]), 0)
# We get NO search results when searching for user3 by user1.
s = self.get_success(self.handler.search_users(u1, "user3", 10))
self.assertEqual(len(s["results"]), 0)
# User 2 then leaves.
self.helper.leave(room, user=u2, tok=u2_token)
# Check we have removed the values.
shares_private = self.get_users_who_share_private_rooms()
public_users = self.get_users_in_public_rooms()
self.assertEqual(self._compress_shared(shares_private), set())
self.assertEqual(public_users, [])
# User1 now gets no search results for any of the other users.
s = self.get_success(self.handler.search_users(u1, "user2", 10))
self.assertEqual(len(s["results"]), 0)
s = self.get_success(self.handler.search_users(u1, "user3", 10))
self.assertEqual(len(s["results"]), 0)
def _compress_shared(self, shared):
"""
Compress a list of users who share rooms dicts to a list of tuples.
"""
r = set()
for i in shared:
r.add((i["user_id"], i["other_user_id"], i["room_id"]))
return r
def get_users_in_public_rooms(self):
r = self.get_success(
self.store._simple_select_list(
"users_in_public_rooms", None, ("user_id", "room_id")
)
)
retval = []
for i in r:
retval.append((i["user_id"], i["room_id"]))
return retval
def get_users_who_share_private_rooms(self):
return self.get_success(
self.store._simple_select_list(
"users_who_share_private_rooms",
None,
["user_id", "other_user_id", "room_id"],
)
)
def _add_background_updates(self):
"""
Add the background updates we need to run.
"""
# Ugh, have to reset this flag
self.store._all_done = False
self.get_success(
self.store._simple_insert(
"background_updates",
{
"update_name": "populate_user_directory_createtables",
"progress_json": "{}",
},
)
)
self.get_success(
self.store._simple_insert(
"background_updates",
{
"update_name": "populate_user_directory_process_rooms",
"progress_json": "{}",
"depends_on": "populate_user_directory_createtables",
},
)
)
self.get_success(
self.store._simple_insert(
"background_updates",
{
"update_name": "populate_user_directory_process_users",
"progress_json": "{}",
"depends_on": "populate_user_directory_process_rooms",
},
)
)
self.get_success(
self.store._simple_insert(
"background_updates",
{
"update_name": "populate_user_directory_cleanup",
"progress_json": "{}",
"depends_on": "populate_user_directory_process_users",
},
)
)
def test_initial(self):
"""
The user directory's initial handler correctly updates the search tables.
"""
u1 = self.register_user("user1", "pass")
u1_token = self.login(u1, "pass")
u2 = self.register_user("user2", "pass")
u2_token = self.login(u2, "pass")
u3 = self.register_user("user3", "pass")
u3_token = self.login(u3, "pass")
room = self.helper.create_room_as(u1, is_public=True, tok=u1_token)
self.helper.invite(room, src=u1, targ=u2, tok=u1_token)
self.helper.join(room, user=u2, tok=u2_token)
private_room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)
self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token)
self.helper.join(private_room, user=u3, tok=u3_token)
self.get_success(self.store.update_user_directory_stream_pos(None))
self.get_success(self.store.delete_all_from_user_dir())
shares_private = self.get_users_who_share_private_rooms()
public_users = self.get_users_in_public_rooms()
# Nothing updated yet
self.assertEqual(shares_private, [])
self.assertEqual(public_users, [])
# Do the initial population of the user directory via the background update
self._add_background_updates()
while not self.get_success(self.store.has_completed_background_updates()):
self.get_success(self.store.do_next_background_update(100), by=0.1)
shares_private = self.get_users_who_share_private_rooms()
public_users = self.get_users_in_public_rooms()
# User 1 and User 2 are in the same public room
self.assertEqual(set(public_users), set([(u1, room), (u2, room)]))
# User 1 and User 3 share private rooms
self.assertEqual(
self._compress_shared(shares_private),
set([(u1, u3, private_room), (u3, u1, private_room)]),
)
def test_initial_share_all_users(self):
"""
Search all users = True means that a user does not have to share a
private room with the searching user or be in a public room to be search
visible.
"""
self.handler.search_all_users = True
self.hs.config.user_directory_search_all_users = True
u1 = self.register_user("user1", "pass")
self.register_user("user2", "pass")
u3 = self.register_user("user3", "pass")
# Wipe the user dir
self.get_success(self.store.update_user_directory_stream_pos(None))
self.get_success(self.store.delete_all_from_user_dir())
# Do the initial population of the user directory via the background update
self._add_background_updates()
while not self.get_success(self.store.has_completed_background_updates()):
self.get_success(self.store.do_next_background_update(100), by=0.1)
shares_private = self.get_users_who_share_private_rooms()
public_users = self.get_users_in_public_rooms()
# No users share rooms
self.assertEqual(public_users, [])
self.assertEqual(self._compress_shared(shares_private), set([]))
# Despite not sharing a room, search_all_users means we get a search
# result.
s = self.get_success(self.handler.search_users(u1, u3, 10))
self.assertEqual(len(s["results"]), 1)
# We can find the other two users
s = self.get_success(self.handler.search_users(u1, "user", 10))
self.assertEqual(len(s["results"]), 2)
# Registering a user and then searching for them works.
u4 = self.register_user("user4", "pass")
s = self.get_success(self.handler.search_users(u1, u4, 10))
self.assertEqual(len(s["results"]), 1)
class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
user_id = "@test:test"
servlets = [
user_directory.register_servlets,
room.register_servlets,
login.register_servlets,
synapse.rest.admin.register_servlets_for_client_rest_resource,
]
def make_homeserver(self, reactor, clock):
config = self.default_config()
config.update_user_directory = True
hs = self.setup_test_homeserver(config=config)
self.config = hs.config
return hs
def test_disabling_room_list(self):
self.config.user_directory_search_enabled = True
# First we create a room with another user so that user dir is non-empty
# for our user
self.helper.create_room_as(self.user_id)
u2 = self.register_user("user2", "pass")
room = self.helper.create_room_as(self.user_id)
self.helper.join(room, user=u2)
# Assert user directory is not empty
request, channel = self.make_request(
"POST", b"user_directory/search", b'{"search_term":"user2"}'
)
self.render(request)
self.assertEquals(200, channel.code, channel.result)
self.assertTrue(len(channel.json_body["results"]) > 0)
# Disable user directory and check search returns nothing
self.config.user_directory_search_enabled = False
request, channel = self.make_request(
"POST", b"user_directory/search", b'{"search_term":"user2"}'
)
self.render(request)
self.assertEquals(200, channel.code, channel.result)
self.assertTrue(len(channel.json_body["results"]) == 0)
| 37.346883 | 88 | 0.632973 |
6322de653a6125d0556b86dee007ac6dc343a7cd | 1,407 | py | Python | model/highway.py | graviraja/deep-paraphrase-generation | b09cfeee6b9410b60346d3851b3da440c2992cff | [
"MIT"
] | 5 | 2019-02-27T10:32:38.000Z | 2020-08-24T13:06:49.000Z | model/highway.py | graviraja/deep-paraphrase-generation | b09cfeee6b9410b60346d3851b3da440c2992cff | [
"MIT"
] | null | null | null | model/highway.py | graviraja/deep-paraphrase-generation | b09cfeee6b9410b60346d3851b3da440c2992cff | [
"MIT"
] | null | null | null | '''This code contains the implementation of highway network.
'''
import torch.nn as nn
import torch.nn.functional as F
class Highway(nn.Module):
def __init__(self, size, num_layers, f):
super().__init__()
self.num_layers = num_layers
self.nonlinear = nn.ModuleList([nn.Linear(size, size) for _ in range(num_layers)])
self.linear = nn.ModuleList([nn.Linear(size, size) for _ in range(num_layers)])
self.gate = nn.ModuleList([nn.Linear(size, size) for _ in range(num_layers)])
self.f = f
def forward(self, x):
"""
Args:
x : input with shape of [batch_size, size]
Returns:
tensor with shape of [batch_size, size] after applying highway network
applies σ(x) ⨀ (f(G(x))) + (1 - σ(x)) ⨀ (Q(x)) transformation | G and Q is affine transformation,
f is non-linear transformation, σ(x) is affine transformation with sigmoid non-linearition
and ⨀ is element-wise multiplication
"""
for layer in range(self.num_layers):
# σ(x)
gate = F.sigmoid(self.gate[layer](x))
# f(G(x))
non_linear = self.f(self.nonlinear[layer](x))
# Q(x)
linear = self.linear[layer](x)
# σ(x) ⨀ (f(G(x))) + (1 - σ(x)) ⨀ (Q(x))
x = gate * non_linear + (1 - gate) * linear
return x
| 32.72093 | 105 | 0.570007 |
d7c629d944781c090872517dc4b99149009096da | 15,124 | py | Python | esque/controller/topic_controller.py | real-digital/esque | 0b779fc308ce8bce45c1903f36c33664b2e832e7 | [
"MIT"
] | 29 | 2019-05-10T21:12:38.000Z | 2021-08-24T08:09:49.000Z | esque/controller/topic_controller.py | real-digital/esque | 0b779fc308ce8bce45c1903f36c33664b2e832e7 | [
"MIT"
] | 103 | 2019-05-17T07:21:41.000Z | 2021-12-02T08:29:00.000Z | esque/controller/topic_controller.py | real-digital/esque | 0b779fc308ce8bce45c1903f36c33664b2e832e7 | [
"MIT"
] | 2 | 2019-05-28T06:45:14.000Z | 2019-11-21T00:33:15.000Z | import logging
import re
import time
from contextlib import closing
from itertools import islice
from logging import Logger
from typing import TYPE_CHECKING, Dict, Iterable, List, NamedTuple, Optional, Union, cast
import confluent_kafka
import kafka
import kafka.consumer.fetcher
import pendulum
from confluent_kafka.admin import ConfigResource
from confluent_kafka.cimpl import OFFSET_END, KafkaException, NewTopic, TopicPartition
from esque.config import ESQUE_GROUP_ID, Config
from esque.errors import TopicDeletionException, TopicDoesNotExistException
from esque.helpers import ensure_kafka_future_done
from esque.resources.topic import Partition, Topic, TopicDiff
logger: Logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from esque.cluster import Cluster
class OffsetWithTimestamp(NamedTuple):
topic: str
partition: int
offset: int
timestamp_ms: Optional[int]
class TopicController:
def __init__(self, cluster: "Cluster", config: Optional[Config] = None):
self.cluster: "Cluster" = cluster
if config is None:
config = Config.get_instance()
self.config = config
def list_topics(
self,
*,
search_string: str = None,
sort: bool = True,
hide_internal: bool = False,
get_topic_objects: bool = True,
get_partitions: bool = True,
) -> List[Topic]:
topic_results = self.cluster.confluent_client.list_topics().topics.values()
topic_names = [t.topic for t in topic_results]
if search_string:
topic_names = [topic for topic in topic_names if re.match(search_string, topic)]
if hide_internal:
topic_names = [topic for topic in topic_names if not topic.startswith("__")]
if sort:
topic_names = sorted(topic_names)
if get_topic_objects:
topics = [
self.get_cluster_topic(topic_name, retrieve_partition_watermarks=get_partitions)
for topic_name in topic_names
]
else:
topics = list(map(self.get_local_topic, topic_names))
return topics
def create_topics(self, topics: List[Topic]):
for topic in topics:
partitions = (
topic.num_partitions if topic.num_partitions is not None else self.config.default_num_partitions
)
replicas = (
topic.replication_factor
if topic.replication_factor is not None
else self.config.default_replication_factor
)
new_topic = NewTopic(
topic.name, num_partitions=partitions, replication_factor=replicas, config=topic.config
)
future_list = self.cluster.confluent_client.create_topics([new_topic], operation_timeout=60)
ensure_kafka_future_done(next(islice(future_list.values(), 1)))
for _ in range(80):
topic_data = self.cluster.confluent_client.list_topics(topic=topic.name).topics[topic.name]
if topic_data.error is None:
break
time.sleep(0.125)
else:
raise RuntimeError(f"Couldn't create topic {topic}")
def alter_configs(self, topics: List[Topic]):
for topic in topics:
altered_config = self._get_altered_config(topic)
config_resource = ConfigResource(ConfigResource.Type.TOPIC, topic.name, altered_config)
future_list = self.cluster.confluent_client.alter_configs([config_resource])
ensure_kafka_future_done(next(islice(future_list.values(), 1)))
def _get_altered_config(self, topic: Topic) -> Dict[str, str]:
cluster_topic = self.get_cluster_topic(topic.name)
current_config = cluster_topic.config.items()
altered_config = {}
for name, value in current_config:
if name in topic.config:
altered_config[name] = topic.config[name]
continue
altered_config[name] = value
return altered_config
def delete_topic(self, topic: Topic) -> bool:
return self.delete_topics([topic])
def delete_topics(self, topics: List[Topic]) -> bool:
futures = self.cluster.confluent_client.delete_topics([topic.name for topic in topics], operation_timeout=60)
errors: List[str] = []
for topic_name, future in futures.items():
try:
future.result()
except KafkaException as e:
errors.append(f"[{topic_name}]: {e.args[0].str()}")
if errors:
raise TopicDeletionException("The following exceptions occurred:\n " + "\n ".join(sorted(errors)))
return True
def topic_exists(self, topic_name: str) -> bool:
try:
self.get_cluster_topic(topic_name, retrieve_last_timestamp=False, retrieve_partition_watermarks=False)
except TopicDoesNotExistException:
return False
return True
def get_cluster_topic(
self, topic_name: str, *, retrieve_last_timestamp: bool = False, retrieve_partition_watermarks: bool = True
) -> Topic:
"""Convenience function getting an existing topic based on topic_name"""
return self.update_from_cluster(
Topic(topic_name),
retrieve_last_timestamp=retrieve_last_timestamp,
retrieve_partition_watermarks=retrieve_partition_watermarks,
)
def get_local_topic(self, topic_name: str) -> Topic:
return Topic(topic_name)
def get_timestamp_of_closest_offset(
self, topic_name: str, offset: Union[int, str]
) -> Dict[int, OffsetWithTimestamp]:
"""
Gets the timestamp for the message(s) at or right after `offset` in topic `topic_name`.
The timestamps given in the result are the actual timestamp of the offset that was found.
If there is no message at or after the given `timestamp`, the resulting offset will be `-1` i.e. end of topic
partition _not including_ the last message.
If the topic has multiple partitions, the `offset` wil be used for every partition.
If there is no message at `offset`, the next available offset will be used.
If there is no message at all after `offset`, offset will be `-1` and timestamp will be `None` in the output for the
corresponding partition.
In the Kafka world, -1 corresponds to the position after the last known message i.e. the end of the topic partition
_not including_ the last message in the partition.
:param topic_name: The topic to get the offsets for.
:param offset: The offset to find timestamps for.
:return: Dict: partition id -> offset with timestamp.
"""
messages_received = self._read_one_message_per_partition(topic_name, offset)
data: Dict[int, OffsetWithTimestamp] = {}
for partition_id, msg in messages_received.items():
if msg is None:
data[partition_id] = OffsetWithTimestamp(
topic=topic_name, partition=partition_id, offset=OFFSET_END, timestamp_ms=None
)
else:
data[partition_id] = OffsetWithTimestamp(
topic=topic_name, partition=partition_id, offset=msg.offset, timestamp_ms=msg.timestamp
)
return data
def _read_one_message_per_partition(self, topic_name: str, offset: Union[str, int]):
config = self.config.create_kafka_python_config()
with closing(kafka.KafkaConsumer(**config)) as consumer:
topic_partitions = [
kafka.TopicPartition(topic=topic_name, partition=partition)
for partition in consumer.partitions_for_topic(topic_name)
]
partition_ends: Dict[kafka.TopicPartition, int] = consumer.end_offsets(topic_partitions)
partition_starts: Dict[kafka.TopicPartition, int] = consumer.beginning_offsets(topic_partitions)
if offset == "first":
partition_offsets = (partition_starts[tp] for tp in topic_partitions)
elif offset == "last":
partition_offsets = (partition_ends[tp] - 1 for tp in topic_partitions)
else:
partition_offsets = (max(offset, partition_starts[tp]) for tp in topic_partitions)
assignments = [
(tp, offset) for tp, offset in zip(topic_partitions, partition_offsets) if partition_ends[tp] > offset
]
consumer.assign([tp for tp, _ in assignments])
for tp, offset in assignments:
consumer.seek(tp, offset)
unassigned_partitions = [tp for tp in topic_partitions if tp not in consumer.assignment()]
messages_received: Dict[int, Optional[kafka.consumer.fetcher.ConsumerRecord]] = {
tp.partition: None for tp in unassigned_partitions
}
for message in cast(Iterable[kafka.consumer.fetcher.ConsumerRecord], consumer):
if message.partition not in messages_received:
messages_received[message.partition] = message
consumer.pause(kafka.TopicPartition(message.topic, message.partition))
if len(messages_received) == len(topic_partitions):
# we have one record for every partition, so we're done.
break
return messages_received
def get_offsets_closest_to_timestamp(
self, topic_name: str, timestamp: pendulum.DateTime
) -> Dict[int, OffsetWithTimestamp]:
"""
Gets the offsets of the message(s) in `topic_name` whose timestamps are at, or right after, the given `timestamp`.
The timestamps given in the result are the actual timestamp of the offset that was found.
If there is no message at or after the given `timestamp`, the resulting offset will be `-1` i.e. end of topic
partition _not including_ the last message.
:param topic_name: The topic to get the offsets for.
:param timestamp: The timestamp to find offsets for.
:return: Dict: partition id -> offset with timestamp.
"""
config = self.config.create_kafka_python_config()
with closing(kafka.KafkaConsumer(**config)) as consumer:
topic_partitions = [
kafka.TopicPartition(topic=topic_name, partition=partition)
for partition in consumer.partitions_for_topic(topic_name)
]
timestamp_ms = int(timestamp.timestamp() * 1000)
offsets: Dict[kafka.TopicPartition, kafka.structs.OffsetAndTimestamp] = consumer.offsets_for_times(
{tp: timestamp_ms for tp in topic_partitions}
)
data: Dict[int, OffsetWithTimestamp] = {}
for tp, offset_data in offsets.items():
if offset_data is None:
data[tp.partition] = OffsetWithTimestamp(
topic=tp.topic, partition=tp.partition, offset=OFFSET_END, timestamp_ms=None
)
else:
data[tp.partition] = OffsetWithTimestamp(
topic=tp.topic,
partition=tp.partition,
offset=offset_data.offset,
timestamp_ms=offset_data.timestamp,
)
return data
def update_from_cluster(
self, topic: Topic, *, retrieve_last_timestamp: bool = False, retrieve_partition_watermarks: bool = True
) -> Topic:
"""Takes a topic and, based on its name, updates all attributes from the cluster"""
topic.partition_data = self._get_partitions(
topic, retrieve_last_timestamp, get_partition_watermarks=retrieve_partition_watermarks
)
topic.config = self.cluster.retrieve_config(ConfigResource.Type.TOPIC, topic.name)
topic.is_only_local = False
return topic
def _get_partitions(
self, topic: Topic, retrieve_last_timestamp: bool, get_partition_watermarks: bool = True
) -> List[Partition]:
assert not (
retrieve_last_timestamp and not get_partition_watermarks
), "Can not retrieve timestamp without partition watermarks"
config = Config.get_instance().create_confluent_config()
config.update({"group.id": ESQUE_GROUP_ID, "topic.metadata.refresh.interval.ms": "250"})
with closing(confluent_kafka.Consumer(config)) as consumer:
confluent_topic = consumer.list_topics(topic=topic.name).topics[topic.name]
partitions: List[Partition] = []
if not get_partition_watermarks:
return [
Partition(partition_id, -1, -1, meta.isrs, meta.leader, meta.replicas, None)
for partition_id, meta in confluent_topic.partitions.items()
]
for partition_id, meta in confluent_topic.partitions.items():
try:
low, high = consumer.get_watermark_offsets(
TopicPartition(topic=topic.name, partition=partition_id)
)
except KafkaException:
# retry after metadata should be refreshed (also consider small network delays)
# unfortunately we cannot explicitly cause and wait for a metadata refresh
time.sleep(1)
low, high = consumer.get_watermark_offsets(
TopicPartition(topic=topic.name, partition=partition_id)
)
latest_timestamp = None
if high > low and retrieve_last_timestamp:
assignment = [TopicPartition(topic=topic.name, partition=partition_id, offset=high - 1)]
consumer.assign(assignment)
msg = consumer.poll(timeout=10)
if msg is None:
logger.warning(
f"Due to timeout latest timestamp for topic `{topic.name}` "
f"and partition `{partition_id}` is missing."
)
else:
latest_timestamp = float(msg.timestamp()[1]) / 1000
partition = Partition(partition_id, low, high, meta.isrs, meta.leader, meta.replicas, latest_timestamp)
partitions.append(partition)
return partitions
def diff_with_cluster(self, local_topic: Topic) -> TopicDiff:
assert local_topic.is_only_local, "Can only diff local topics with remote"
cluster_topic = self.get_cluster_topic(local_topic.name, retrieve_partition_watermarks=False)
diffs = TopicDiff()
diffs.set_diff("num_partitions", cluster_topic.num_partitions, local_topic.num_partitions)
diffs.set_diff("replication_factor", cluster_topic.replication_factor, local_topic.replication_factor)
for name, old_value in cluster_topic.config.items():
diffs.set_diff(name, old_value, local_topic.config.get(name))
return diffs
| 45.969605 | 124 | 0.640373 |
0b8d31f85cf3eccca35e733072d2c4931fc02bf5 | 92,574 | py | Python | virtual/Lib/tarfile.py | JamesKimari/pitch-one | aac9007716bf2e3b6446588a06508fac068f3d20 | [
"MIT"
] | 15 | 2015-04-14T00:33:13.000Z | 2021-10-18T01:08:54.000Z | virtual/Lib/tarfile.py | JamesKimari/pitch-one | aac9007716bf2e3b6446588a06508fac068f3d20 | [
"MIT"
] | 1 | 2018-02-13T10:12:15.000Z | 2018-02-13T10:12:15.000Z | virtual/Lib/tarfile.py | JamesKimari/pitch-one | aac9007716bf2e3b6446588a06508fac068f3d20 | [
"MIT"
] | 3 | 2015-04-23T11:12:32.000Z | 2021-10-18T01:08:55.000Z | #!/usr/bin/env python3
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
from builtins import open as bltn_open
import sys
import os
import io
import shutil
import stat
import time
import struct
import copy
import re
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# OSError (winerror=1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (OSError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError",
"CompressionError", "StreamError", "ExtractError", "HeaderError",
"ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT",
"DEFAULT_FORMAT", "open"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"}
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name == "nt":
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] in (0o200, 0o377):
n = 0
for i in range(len(s) - 1):
n <<= 8
n += s[i + 1]
if s[0] == 0o377:
n = -(256 ** (len(s) - 1) - n)
else:
try:
s = nts(s, "ascii", "strict")
n = int(s.strip() or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 or 0o377 byte indicate this
# particular encoding, the following digits-1 bytes are a big-endian
# base-256 representation. This allows values up to (256**(digits-1))-1.
# A 0o200 byte indicates a positive number, a 0o377 byte a negative
# number.
if 0 <= n < 8 ** (digits - 1):
s = bytes("%0*o" % (digits - 1, int(n)), "ascii") + NUL
elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
if n >= 0:
s = bytearray([0o200])
else:
s = bytearray([0o377])
n = 256 ** digits + n
for i in range(digits - 1):
s.insert(1, n & 0o377)
n >>= 8
else:
raise ValueError("overflow in number field")
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf))
signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
bufsize = bufsize or 16 * 1024
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst, bufsize)
return
blocks, remainder = divmod(length, bufsize)
for b in range(blocks):
buf = src.read(bufsize)
if len(buf) < bufsize:
raise exception("unexpected end of data")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise exception("unexpected end of data")
dst.write(buf)
return
def filemode(mode):
"""Deprecated in this location; use stat.filemode."""
import warnings
warnings.warn("deprecated in favor of stat.filemode",
DeprecationWarning, 2)
return stat.filemode(mode)
def _safe_print(s):
encoding = getattr(sys.stdout, 'encoding', None)
if encoding is not None:
s = s.encode(encoding, 'backslashreplace').decode(encoding)
print(s, end=' ')
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
self.exception = zlib.error
else:
self._init_write_gz()
elif comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
self.exception = OSError
else:
self.cmp = bz2.BZ2Compressor()
elif comptype == "xz":
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = lzma.LZMADecompressor()
self.exception = lzma.LZMAError
else:
self.cmp = lzma.LZMACompressor()
elif comptype != "tar":
raise CompressionError("unknown compression type %r" % comptype)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
self.closed = True
try:
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
self.fileobj.write(struct.pack("<L", self.crc))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
finally:
if not self._extfileobj:
self.fileobj.close()
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except self.exception:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\x1f\x8b\x08"):
return "gz"
elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY":
return "bz2"
elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")):
return "xz"
else:
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
self.name = getattr(fileobj, "name", None)
self.closed = False
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def flush(self):
pass
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
b = self.fileobj.read(length)
if len(b) != length:
raise ReadError("unexpected end of data")
buf += b
else:
buf += NUL * length
size -= length
self.position += length
return buf
def readinto(self, b):
buf = self.read(len(b))
b[:len(buf)] = buf
return len(buf)
def close(self):
self.closed = True
#class _FileInFile
class ExFileObject(io.BufferedReader):
def __init__(self, tarfile, tarinfo):
fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data,
tarinfo.size, tarinfo.sparse)
super().__init__(fileobj)
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
@property
def path(self):
return self.name
@path.setter
def path(self, name):
self.name = name
@property
def linkpath(self):
return self.linkname
@linkpath.setter
def linkpath(self, linkname):
self.linkname = linkname
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"].encode(encoding, errors)) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"], encoding, errors)
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"].encode(encoding, errors)) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8")
def _posix_split_name(self, name, encoding, errors):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
components = name.split("/")
for i in range(1, len(components)):
prefix = "/".join(components[:i])
name = "/".join(components[i:])
if len(prefix.encode(encoding, errors)) <= LENGTH_PREFIX and \
len(name.encode(encoding, errors)) <= LENGTH_NAME:
break
else:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf-8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf-8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf-8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf-8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf-8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf-8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf-8", "utf-8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf-8", "utf-8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The file-object for extractfile().
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None,
errorlevel=None, copybufsize=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"}
if mode not in modes:
raise ValueError("mode must be 'r', 'a', 'w' or 'x'")
self.mode = mode
self._mode = modes[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if (name is None and hasattr(fileobj, "name") and
isinstance(fileobj.name, (str, bytes))):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.copybufsize = copybufsize
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in ("a", "w", "x"):
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'r:xz' open for reading with lzma compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'w:xz' open for writing with lzma compression
'x' or 'x:' create a tarfile exclusively without compression, raise
an exception if the file is already created
'x:gz' create a gzip compressed tarfile, raise an exception
if the file is already created
'x:bz2' create a bzip2 compressed tarfile, raise an exception
if the file is already created
'x:xz' create an lzma compressed tarfile, raise an exception
if the file is already created
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'r|xz' open an lzma compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
'w|xz' open an lzma compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
def not_compressed(comptype):
return cls.OPEN_METH[comptype] == 'taropen'
for comptype in sorted(cls.OPEN_METH, key=not_compressed):
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError):
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in ("a", "w", "x"):
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if mode not in ("r", "a", "w", "x"):
raise ValueError("mode must be 'r', 'a', 'w' or 'x'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w", "x"):
raise ValueError("mode must be 'r', 'w' or 'x'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
except OSError:
if fileobj is not None and mode == 'r':
raise ReadError("not a gzip file")
raise
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except OSError:
fileobj.close()
if mode == 'r':
raise ReadError("not a gzip file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w", "x"):
raise ValueError("mode must be 'r', 'w' or 'x'")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
fileobj = bz2.BZ2File(fileobj or name, mode,
compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (OSError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not a bzip2 file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs):
"""Open lzma compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w", "x"):
raise ValueError("mode must be 'r', 'w' or 'x'")
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
fileobj = lzma.LZMAFile(fileobj or name, mode, preset=preset)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (lzma.LZMAError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not an lzma file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open", # bzip2 compressed tar
"xz": "xzopen" # lzma compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
self.closed = True
try:
if self.mode in ("a", "w", "x"):
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
finally:
if not self._extfileobj:
self.fileobj.close()
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object from the result of os.stat or equivalent
on an existing file. The file is either named by `name', or
specified as a file object `fileobj' with a file descriptor. If
given, `arcname' specifies an alternative name for the file in the
archive, otherwise, the name is taken from the 'name' attribute of
'fileobj', or the 'name' argument. The name should be a text
string.
"""
self._check("awx")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self # Not needed
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True, *, members=None):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced. `members' is optional and must be a subset of the
list returned by getmembers().
"""
self._check()
if members is None:
members = self
for tarinfo in members:
if verbose:
_safe_print(stat.filemode(tarinfo.mode))
_safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid))
if tarinfo.ischr() or tarinfo.isblk():
_safe_print("%10s" %
("%d,%d" % (tarinfo.devmajor, tarinfo.devminor)))
else:
_safe_print("%10d" % tarinfo.size)
_safe_print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6])
_safe_print(tarinfo.name + ("/" if tarinfo.isdir() else ""))
if verbose:
if tarinfo.issym():
_safe_print("-> " + tarinfo.linkname)
if tarinfo.islnk():
_safe_print("link to " + tarinfo.linkname)
print()
def add(self, name, arcname=None, recursive=True, *, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("awx")
if arcname is None:
arcname = name
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
with bltn_open(name, "rb") as f:
self.addfile(tarinfo, f)
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, it should be a binary file, and tarinfo.size bytes are read
from it and added to the archive. You can create TarInfo objects
directly, or by using gettarinfo().
"""
self._check("awx")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
bufsize=self.copybufsize
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None, *, numeric_owner=False):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers(). If `numeric_owner` is True, only
the numbers for user/group names are used and not the names.
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir(),
numeric_owner=numeric_owner)
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath, numeric_owner=numeric_owner)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True, *, numeric_owner=False):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False. If `numeric_owner`
is True, only the numbers for user/group names are used and not
the names.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs,
numeric_owner=numeric_owner)
except OSError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file or a
link, an io.BufferedReader object is returned. Otherwise, None is
returned.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
# Members with unknown types are treated as regular files.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True,
numeric_owner=False):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath, numeric_owner)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except FileExistsError:
pass
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
bufsize = self.copybufsize
with bltn_open(targetpath, "wb") as target:
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size, ReadError, bufsize)
target.seek(tarinfo.size)
target.truncate()
else:
copyfileobj(source, target, tarinfo.size, ReadError, bufsize)
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath, numeric_owner):
"""Set owner of targetpath according to tarinfo. If numeric_owner
is True, use .gid/.uid instead of .gname/.uname. If numeric_owner
is False, fall back to .gid/.uid when the search based on name
fails.
"""
if hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
g = tarinfo.gid
u = tarinfo.uid
if not numeric_owner:
try:
if grp:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
pass
try:
if pwd:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
pass
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
os.chown(targetpath, u, g)
except OSError:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except OSError:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except OSError:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Advance the file pointer.
if self.offset != self.fileobj.tell():
self.fileobj.seek(self.offset - 1)
if not self.fileobj.read(1):
raise ReadError("unexpected end of data")
# Read the next block.
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise OSError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise OSError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
yield from self.members
return
# Yield items using TarFile's next() method.
# When all members have been read, set TarFile as _loaded.
index = 0
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will have already exhausted the next() method.
if self.firstmember is not None:
tarinfo = self.next()
index += 1
yield tarinfo
while True:
if index < len(self.members):
tarinfo = self.members[index]
elif not self._loaded:
tarinfo = self.next()
if not tarinfo:
self._loaded = True
return
else:
return
index += 1
yield tarinfo
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
open = TarFile.open
def main():
import argparse
description = 'A simple command-line interface for tarfile module.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Verbose output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-l', '--list', metavar='<tarfile>',
help='Show listing of a tarfile')
group.add_argument('-e', '--extract', nargs='+',
metavar=('<tarfile>', '<output_dir>'),
help='Extract tarfile into target dir')
group.add_argument('-c', '--create', nargs='+',
metavar=('<name>', '<file>'),
help='Create tarfile from sources')
group.add_argument('-t', '--test', metavar='<tarfile>',
help='Test if a tarfile is valid')
args = parser.parse_args()
if args.test is not None:
src = args.test
if is_tarfile(src):
with open(src, 'r') as tar:
tar.getmembers()
print(tar.getmembers(), file=sys.stderr)
if args.verbose:
print('{!r} is a tar archive.'.format(src))
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.list is not None:
src = args.list
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.list(verbose=args.verbose)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.extract is not None:
if len(args.extract) == 1:
src = args.extract[0]
curdir = os.curdir
elif len(args.extract) == 2:
src, curdir = args.extract
else:
parser.exit(1, parser.format_help())
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.extractall(path=curdir)
if args.verbose:
if curdir == '.':
msg = '{!r} file is extracted.'.format(src)
else:
msg = ('{!r} file is extracted '
'into {!r} directory.').format(src, curdir)
print(msg)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.create is not None:
tar_name = args.create.pop(0)
_, ext = os.path.splitext(tar_name)
compressions = {
# gz
'.gz': 'gz',
'.tgz': 'gz',
# xz
'.xz': 'xz',
'.txz': 'xz',
# bz2
'.bz2': 'bz2',
'.tbz': 'bz2',
'.tbz2': 'bz2',
'.tb2': 'bz2',
}
tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w'
tar_files = args.create
with TarFile.open(tar_name, tar_mode) as tf:
for file_name in tar_files:
tf.add(file_name)
if args.verbose:
print('{!r} file created.'.format(tar_name))
if __name__ == '__main__':
main()
| 36.489555 | 103 | 0.546795 |
ff986b304087327313ddd1a1b8b033c610b642fb | 442 | py | Python | desafio/desafio063.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | desafio/desafio063.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | desafio/desafio063.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | # Escreva um programa que leia um número N inteiro qualquer e mostre na tela os N primeiros elementos de uma Sequência de Fibonacci
tt = 0
t = int(input('Digite o numero de termos da sequência de Fibonacci: '))
n1 = 0
n2 = 1
print(f'\n{n1} -> ', end='')
while(tt != (t - 1)):
n = n1 + n2
n2 = n1
n1 = n
if(tt <= (t - 3)):
print(n, end=' -> ')
elif(tt == (t - 2)):
print(f'{n} -> Fim')
tt = tt + 1
| 21.047619 | 131 | 0.540724 |
13cd85bc0bba7738200de05fddc089da68e22901 | 958 | py | Python | app/index.py | spakov/irbox_app | a2b3af9629681c97c19a95019fffaafaaa515c26 | [
"Unlicense"
] | null | null | null | app/index.py | spakov/irbox_app | a2b3af9629681c97c19a95019fffaafaaa515c26 | [
"Unlicense"
] | null | null | null | app/index.py | spakov/irbox_app | a2b3af9629681c97c19a95019fffaafaaa515c26 | [
"Unlicense"
] | null | null | null | """
Index endpoint.
"""
from flask import current_app
from flask import Blueprint
from flask import render_template
from app.include import IncludeType
from app.include import remote_include
index_blueprint = Blueprint('index_blueprint', __name__)
@index_blueprint.route('/')
def index():
"""
Return index page (remote).
"""
# Start with no remotes
remotes = {}
# Loop through each remote
for remote_id, remote_name in current_app.config['REMOTES'].items():
# Get remote URL and image
remote_url = remote_include(remote_id, IncludeType.URL)
remote_image = remote_include(remote_id, IncludeType.IMAGE)
# Build a dictionary of remotes with keys of remote ID and values of a
# tuple of remote name, remote_url, and remote image
remotes[remote_id] = (remote_name, remote_url, remote_image)
return render_template(
'index.html',
remotes=remotes
)
| 25.891892 | 78 | 0.689979 |
82b6864299337134a8e45d2a1a639fe7bdc498f5 | 35 | py | Python | btclib/tests/__init__.py | giubby84/btclib | 0dd7e4e8ca43451a03b577fd7ec95715a1a21711 | [
"MIT"
] | 40 | 2020-04-07T12:10:34.000Z | 2022-03-18T18:10:48.000Z | btclib/tests/__init__.py | giubby84/btclib | 0dd7e4e8ca43451a03b577fd7ec95715a1a21711 | [
"MIT"
] | 30 | 2020-03-20T00:52:37.000Z | 2022-02-10T12:54:30.000Z | btclib/tests/__init__.py | giubby84/btclib | 0dd7e4e8ca43451a03b577fd7ec95715a1a21711 | [
"MIT"
] | 16 | 2020-03-19T15:33:35.000Z | 2022-02-24T21:47:39.000Z | """btclib non-regression tests."""
| 17.5 | 34 | 0.685714 |
3ea79c42038faaedf5a66e7c71fa0f312c4f2630 | 64,518 | py | Python | esrally/chart_generator.py | Kua-Fu/rally | 7c58ef6f81f618fbc142dfa58b0ed00a5b05fbae | [
"Apache-2.0"
] | 1,577 | 2016-04-19T12:38:58.000Z | 2022-03-31T07:18:25.000Z | esrally/chart_generator.py | Kua-Fu/rally | 7c58ef6f81f618fbc142dfa58b0ed00a5b05fbae | [
"Apache-2.0"
] | 1,079 | 2016-04-19T12:09:16.000Z | 2022-03-31T05:38:50.000Z | esrally/chart_generator.py | Kua-Fu/rally | 7c58ef6f81f618fbc142dfa58b0ed00a5b05fbae | [
"Apache-2.0"
] | 300 | 2016-04-19T18:27:12.000Z | 2022-03-23T07:54:16.000Z | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import glob
import json
import logging
import uuid
from esrally import config, exceptions, track
from esrally.utils import console, io
color_scheme_rgba = [
# #00BFB3
"rgba(0,191,179,1)",
# #00A9E0
"rgba(0,169,224,1)",
# #F04E98
"rgba(240,78,152,1)",
# #FFCD00
"rgba(255,205,0,1)",
# #0076A8
"rgba(0,118,168,1)",
# #93C90E
"rgba(147,201,14,1)",
# #646464
"rgba(100,100,100,1)",
]
def index_label(race_config):
if race_config.label:
return race_config.label
label = "%s-%s" % (race_config.challenge, race_config.car)
if race_config.plugins:
label += "-%s" % race_config.plugins.replace(":", "-").replace(",", "+")
if race_config.node_count > 1:
label += " (%d nodes)" % race_config.node_count
return label
class BarCharts:
UI_STATE_JSON = json.dumps({"vis": {"colors": dict(zip(["bare", "docker", "ear"], color_scheme_rgba))}})
@staticmethod
# flavor's unused but we need the same signature used by the corresponding method in TimeSeriesCharts
def format_title(environment, track_name, flavor=None, es_license=None, suffix=None):
title = f"{environment}-{track_name}"
if suffix:
title += f"-{suffix}"
return title
@staticmethod
def filter_string(environment, race_config):
if race_config.name:
return f'environment:"{environment}" AND active:true AND user-tags.name:"{race_config.name}"'
else:
return (
f'environment:"{environment}" AND active:true AND track:"{race_config.track}"'
f' AND challenge:"{race_config.challenge}" AND car:"{race_config.car}" AND node-count:{race_config.node_count}'
)
@staticmethod
def gc(title, environment, race_config):
vis_state = {
"title": title,
"type": "histogram",
"params": {
"addLegend": True,
"addTimeMarker": False,
"addTooltip": True,
"categoryAxes": [
{
"id": "CategoryAxis-1",
"labels": {"show": True, "truncate": 100},
"position": "bottom",
"scale": {"type": "linear"},
"show": True,
"style": {},
"title": {"text": "filters"},
"type": "category",
}
],
"defaultYExtents": False,
"drawLinesBetweenPoints": True,
"grid": {"categoryLines": False, "style": {"color": "#eee"}},
"interpolate": "linear",
"legendPosition": "right",
"radiusRatio": 9,
"scale": "linear",
"seriesParams": [
{
"data": {"id": "1", "label": "Total GC Duration [ms]"},
"drawLinesBetweenPoints": True,
"mode": "normal",
"show": "True",
"showCircles": True,
"type": "histogram",
"valueAxis": "ValueAxis-1",
}
],
"setYExtents": False,
"showCircles": True,
"times": [],
"valueAxes": [
{
"id": "ValueAxis-1",
"labels": {"filter": False, "rotate": 0, "show": True, "truncate": 100},
"name": "LeftAxis-1",
"position": "left",
"scale": {"mode": "normal", "type": "linear"},
"show": True,
"style": {},
"title": {"text": "Total GC Duration [ms]"},
"type": "value",
}
],
},
"aggs": [
{
"id": "1",
"enabled": True,
"type": "median",
"schema": "metric",
"params": {"field": "value.single", "percents": [50], "customLabel": "Total GC Duration [ms]"},
},
{
"id": "2",
"enabled": True,
"type": "filters",
"schema": "segment",
"params": {
"filters": [
{
"input": {"query": {"query_string": {"query": "name:young_gc_time", "analyze_wildcard": True}}},
"label": "Young GC",
},
{
"input": {"query": {"query_string": {"query": "name:old_gc_time", "analyze_wildcard": True}}},
"label": "Old GC",
},
]
},
},
{
"id": "3",
"enabled": True,
"type": "terms",
"schema": "split",
"params": {"field": "distribution-version", "size": 10, "order": "asc", "orderBy": "_term", "row": False},
},
{
"id": "4",
"enabled": True,
"type": "terms",
"schema": "group",
"params": {"field": "user-tags.setup", "size": 5, "order": "desc", "orderBy": "_term"},
},
],
"listeners": {},
}
search_source = {
"index": "rally-results-*",
"query": {"query_string": {"query": BarCharts.filter_string(environment, race_config), "analyze_wildcard": True}},
"filter": [],
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": BarCharts.UI_STATE_JSON,
"description": "gc",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": json.dumps(search_source)},
},
}
@staticmethod
def io(title, environment, race_config):
vis_state = {
"title": title,
"type": "histogram",
"params": {
"addLegend": True,
"addTimeMarker": False,
"addTooltip": True,
"categoryAxes": [
{
"id": "CategoryAxis-1",
"labels": {"show": True, "truncate": 100},
"position": "bottom",
"scale": {"type": "linear"},
"show": True,
"style": {},
"title": {"text": "filters"},
"type": "category",
}
],
"defaultYExtents": False,
"drawLinesBetweenPoints": True,
"grid": {"categoryLines": False, "style": {"color": "#eee"}},
"interpolate": "linear",
"legendPosition": "right",
"radiusRatio": 9,
"scale": "linear",
"seriesParams": [
{
"data": {"id": "1", "label": "[Bytes]"},
"drawLinesBetweenPoints": True,
"mode": "normal",
"show": "True",
"showCircles": True,
"type": "histogram",
"valueAxis": "ValueAxis-1",
}
],
"setYExtents": False,
"showCircles": True,
"times": [],
"valueAxes": [
{
"id": "ValueAxis-1",
"labels": {"filter": False, "rotate": 0, "show": True, "truncate": 100},
"name": "LeftAxis-1",
"position": "left",
"scale": {"mode": "normal", "type": "linear"},
"show": True,
"style": {},
"title": {"text": "[Bytes]"},
"type": "value",
}
],
},
"aggs": [
{
"id": "1",
"enabled": True,
"type": "sum",
"schema": "metric",
"params": {"field": "value.single", "customLabel": "[Bytes]"},
},
{
"id": "2",
"enabled": True,
"type": "filters",
"schema": "segment",
"params": {
"filters": [
{
"input": {"query": {"query_string": {"analyze_wildcard": True, "query": "name:index_size"}}},
"label": "Index size",
},
{
"input": {"query": {"query_string": {"analyze_wildcard": True, "query": "name:bytes_written"}}},
"label": "Bytes written",
},
]
},
},
{
"id": "3",
"enabled": True,
"type": "terms",
"schema": "split",
"params": {"field": "distribution-version", "size": 10, "order": "asc", "orderBy": "_term", "row": False},
},
{
"id": "4",
"enabled": True,
"type": "terms",
"schema": "group",
"params": {"field": "user-tags.setup", "size": 5, "order": "desc", "orderBy": "_term"},
},
],
"listeners": {},
}
search_source = {
"index": "rally-results-*",
"query": {"query_string": {"query": BarCharts.filter_string(environment, race_config), "analyze_wildcard": True}},
"filter": [],
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": BarCharts.UI_STATE_JSON,
"description": "io",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": json.dumps(search_source)},
},
}
@staticmethod
def ml_processing_time(title, environment, race_config):
return None
@staticmethod
def merge_count(title, environment, race_config):
return None
@staticmethod
def merge_time(title, environment, race_config):
return None
@staticmethod
def query(environment, race_config, q, iterations):
metric = "service_time"
if iterations < 100:
prefix = "p90"
field = "value.90_0"
else:
prefix = "p99"
field = "value.99_0"
title = BarCharts.format_title(environment, race_config.track, suffix=f"{race_config.label}-{q}-{prefix}-{metric}")
label = "Query Service Time [ms]"
vis_state = {
"title": title,
"type": "histogram",
"params": {
"addLegend": True,
"addTimeMarker": False,
"addTooltip": True,
"categoryAxes": [
{
"id": "CategoryAxis-1",
"labels": {"show": True, "truncate": 100},
"position": "bottom",
"scale": {"type": "linear"},
"show": True,
"style": {},
"title": {"text": "distribution-version: Ascending"},
"type": "category",
}
],
"defaultYExtents": False,
"drawLinesBetweenPoints": True,
"grid": {"categoryLines": False, "style": {"color": "#eee"}},
"interpolate": "linear",
"legendPosition": "right",
"radiusRatio": 9,
"scale": "linear",
"seriesParams": [
{
"data": {"id": "1", "label": label},
"drawLinesBetweenPoints": True,
"mode": "normal",
"show": "True",
"showCircles": True,
"type": "histogram",
"valueAxis": "ValueAxis-1",
}
],
"setYExtents": False,
"showCircles": True,
"times": [],
"valueAxes": [
{
"id": "ValueAxis-1",
"labels": {"filter": False, "rotate": 0, "show": True, "truncate": 100},
"name": "LeftAxis-1",
"position": "left",
"scale": {"mode": "normal", "type": "linear"},
"show": True,
"style": {},
"title": {"text": label},
"type": "value",
}
],
},
"aggs": [
{
"id": "1",
"enabled": True,
"type": "median",
"schema": "metric",
"params": {"field": field, "percents": [50], "customLabel": label},
},
{
"id": "2",
"enabled": True,
"type": "terms",
"schema": "segment",
"params": {"field": "distribution-version", "size": 10, "order": "asc", "orderBy": "_term"},
},
{
"id": "3",
"enabled": True,
"type": "terms",
"schema": "group",
"params": {"field": "user-tags.setup", "size": 10, "order": "desc", "orderBy": "_term"},
},
],
"listeners": {},
}
search_source = {
"index": "rally-results-*",
"query": {
"query_string": {
"query": 'name:"%s" AND task:"%s" AND %s' % (metric, q, BarCharts.filter_string(environment, race_config)),
"analyze_wildcard": True,
}
},
"filter": [],
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": BarCharts.UI_STATE_JSON,
"description": "query",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": json.dumps(search_source)},
},
}
@staticmethod
def index(environment, race_configs, title):
filters = []
for race_config in race_configs:
label = index_label(race_config)
# the assumption is that we only have one bulk task
for bulk_task in race_config.bulk_tasks:
filters.append(
{
"input": {
"query": {
"query_string": {
"analyze_wildcard": True,
"query": 'task:"%s" AND %s' % (bulk_task, BarCharts.filter_string(environment, race_config)),
}
}
},
"label": label,
}
)
vis_state = {
"aggs": [
{
"enabled": True,
"id": "1",
"params": {"customLabel": "Median Indexing Throughput [docs/s]", "field": "value.median", "percents": [50]},
"schema": "metric",
"type": "median",
},
{
"enabled": True,
"id": "2",
"params": {"field": "distribution-version", "order": "asc", "orderBy": "_term", "size": 10},
"schema": "segment",
"type": "terms",
},
{
"enabled": True,
"id": "3",
"params": {"field": "user-tags.setup", "order": "desc", "orderBy": "_term", "size": 10},
"schema": "group",
"type": "terms",
},
{"enabled": True, "id": "4", "params": {"filters": filters}, "schema": "split", "type": "filters"},
],
"listeners": {},
"params": {
"addLegend": True,
"addTimeMarker": False,
"addTooltip": True,
"categoryAxes": [
{
"id": "CategoryAxis-1",
"labels": {"show": True, "truncate": 100},
"position": "bottom",
"scale": {"type": "linear"},
"show": True,
"style": {},
"title": {"text": "distribution-version: Ascending"},
"type": "category",
}
],
"defaultYExtents": False,
"drawLinesBetweenPoints": True,
"grid": {"categoryLines": False, "style": {"color": "#eee"}},
"interpolate": "linear",
"legendPosition": "right",
"radiusRatio": 9,
"scale": "linear",
"seriesParams": [
{
"data": {"id": "1", "label": "Median Indexing Throughput [docs/s]"},
"drawLinesBetweenPoints": True,
"mode": "normal",
"show": "True",
"showCircles": True,
"type": "histogram",
"valueAxis": "ValueAxis-1",
}
],
"setYExtents": False,
"showCircles": True,
"times": [],
"valueAxes": [
{
"id": "ValueAxis-1",
"labels": {"filter": False, "rotate": 0, "show": True, "truncate": 100},
"name": "LeftAxis-1",
"position": "left",
"scale": {"mode": "normal", "type": "linear"},
"show": True,
"style": {},
"title": {"text": "Median Indexing Throughput [docs/s]"},
"type": "value",
}
],
"row": True,
},
"title": title,
"type": "histogram",
}
search_source = {
"index": "rally-results-*",
"query": {
"query_string": {"analyze_wildcard": True, "query": 'environment:"%s" AND active:true AND name:"throughput"' % environment}
},
"filter": [],
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": BarCharts.UI_STATE_JSON,
"description": "index",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": json.dumps(search_source)},
},
}
class TimeSeriesCharts:
@staticmethod
def format_title(environment, track_name, flavor=None, es_license=None, suffix=None):
title = [environment, str(track_name)]
if suffix:
title.append(suffix)
return "-".join(title)
@staticmethod
def filter_string(environment, race_config):
if race_config.name:
return f'environment:"{environment}" AND active:true AND user-tags.name:"{race_config.name}"'
else:
return (
f'environment:"{environment}" AND active:true AND track:"{race_config.track}"'
f' AND challenge:"{race_config.challenge}" AND car:"{race_config.car}" AND node-count:{race_config.node_count}'
)
@staticmethod
def gc(title, environment, race_config):
vis_state = {
"title": title,
"type": "metrics",
"params": {
"axis_formatter": "number",
"axis_position": "left",
"id": str(uuid.uuid4()),
"index_pattern": "rally-results-*",
"interval": "1d",
"series": [
{
"axis_position": "left",
"chart_type": "line",
"color": "#68BC00",
"fill": "0",
"formatter": "number",
"id": str(uuid.uuid4()),
"line_width": "1",
"metrics": [{"id": str(uuid.uuid4()), "type": "avg", "field": "value.single"}],
"point_size": "3",
"seperate_axis": 1,
"split_mode": "filters",
"stacked": "none",
"filter": "",
"split_filters": [
{
"filter": "young_gc_time",
"label": "Young Gen GC time",
"color": "rgba(0,191,179,1)",
"id": str(uuid.uuid4()),
},
{
"filter": "old_gc_time",
"label": "Old Gen GC time",
"color": "rgba(254,209,10,1)",
"id": str(uuid.uuid4()),
},
],
"label": "GC Times",
"value_template": "{{value}} ms",
"steps": 0,
}
],
"show_legend": 1,
"show_grid": 1,
"drop_last_bucket": 0,
"time_field": "race-timestamp",
"type": "timeseries",
"filter": TimeSeriesCharts.filter_string(environment, race_config),
"annotations": [
{
"fields": "message",
"template": "{{message}}",
"index_pattern": "rally-annotations",
"query_string": f'((NOT _exists_:track) OR track:"{race_config.track}") AND ((NOT _exists_:chart) OR chart:gc) '
f'AND ((NOT _exists_:chart-name) OR chart-name:"{title}") AND environment:"{environment}"',
"id": str(uuid.uuid4()),
"color": "rgba(102,102,102,1)",
"time_field": "race-timestamp",
"icon": "fa-tag",
"ignore_panel_filters": 1,
}
],
"axis_min": "0",
},
"aggs": [],
"listeners": {},
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": "{}",
"description": "gc",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": '{"query":"*","filter":[]}'},
},
}
@staticmethod
def merge_time(title, environment, race_config):
vis_state = {
"title": title,
"type": "metrics",
"params": {
"axis_formatter": "number",
"axis_position": "left",
"id": str(uuid.uuid4()),
"index_pattern": "rally-results-*",
"interval": "1d",
"series": [
{
"axis_position": "left",
"chart_type": "line",
"color": "#68BC00",
"fill": "0",
"formatter": "number",
"id": str(uuid.uuid4()),
"line_width": "1",
"metrics": [{"id": str(uuid.uuid4()), "type": "avg", "field": "value.single"}],
"point_size": "3",
"seperate_axis": 1,
"split_mode": "filters",
"stacked": "none",
"filter": "",
"split_filters": [
{
"filter": "merge_time",
"label": "Cumulative merge time",
"color": "rgba(0,191,179,1)",
"id": str(uuid.uuid4()),
},
{
"filter": "merge_throttle_time",
"label": "Cumulative merge throttle time",
"color": "rgba(254,209,10,1)",
"id": str(uuid.uuid4()),
},
],
"label": "Merge Times",
"value_template": "{{value}} ms",
"steps": 0,
}
],
"show_legend": 1,
"show_grid": 1,
"drop_last_bucket": 0,
"time_field": "race-timestamp",
"type": "timeseries",
"filter": TimeSeriesCharts.filter_string(environment, race_config),
"annotations": [
{
"fields": "message",
"template": "{{message}}",
"index_pattern": "rally-annotations",
"query_string": f'((NOT _exists_:track) OR track:"{race_config.track}") '
f"AND ((NOT _exists_:chart) OR chart:merge_times) "
f'AND ((NOT _exists_:chart-name) OR chart-name:"{title}") AND environment:"{environment}"',
"id": str(uuid.uuid4()),
"color": "rgba(102,102,102,1)",
"time_field": "race-timestamp",
"icon": "fa-tag",
"ignore_panel_filters": 1,
}
],
"axis_min": "0",
},
"aggs": [],
"listeners": {},
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": "{}",
"description": "merge_times",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": '{"query":"*","filter":[]}'},
},
}
@staticmethod
def merge_count(title, environment, race_config):
vis_state = {
"title": title,
"type": "metrics",
"params": {
"axis_formatter": "number",
"axis_position": "left",
"id": str(uuid.uuid4()),
"index_pattern": "rally-results-*",
"interval": "1d",
"series": [
{
"axis_position": "left",
"chart_type": "line",
"color": "#68BC00",
"fill": "0",
"formatter": "number",
"id": str(uuid.uuid4()),
"line_width": "1",
"metrics": [{"id": str(uuid.uuid4()), "type": "avg", "field": "value.single"}],
"point_size": "3",
"seperate_axis": 1,
"split_mode": "filters",
"stacked": "none",
"filter": "",
"split_filters": [
{
"filter": "merge_count",
"label": "Cumulative merge count",
"color": "rgba(0,191,179,1)",
"id": str(uuid.uuid4()),
}
],
"label": "Merge Count",
"value_template": "{{value}}",
"steps": 0,
}
],
"show_legend": 1,
"show_grid": 1,
"drop_last_bucket": 0,
"time_field": "race-timestamp",
"type": "timeseries",
"filter": TimeSeriesCharts.filter_string(environment, race_config),
"annotations": [
{
"fields": "message",
"template": "{{message}}",
"index_pattern": "rally-annotations",
"query_string": f'((NOT _exists_:track) OR track:"{race_config.track}") '
f"AND ((NOT _exists_:chart) OR chart:merge_count) "
f'AND ((NOT _exists_:chart-name) OR chart-name:"{title}") AND environment:"{environment}"',
"id": str(uuid.uuid4()),
"color": "rgba(102,102,102,1)",
"time_field": "race-timestamp",
"icon": "fa-tag",
"ignore_panel_filters": 1,
}
],
"axis_min": "0",
},
"aggs": [],
"listeners": {},
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": "{}",
"description": "merge_count",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": '{"query":"*","filter":[]}'},
},
}
@staticmethod
def ml_processing_time(title, environment, race_config):
vis_state = {
"title": title,
"type": "metrics",
"params": {
"axis_formatter": "number",
"axis_position": "left",
"id": str(uuid.uuid4()),
"index_pattern": "rally-results-*",
"interval": "1d",
"series": [
{
"axis_position": "left",
"chart_type": "line",
"color": "#68BC00",
"fill": "0",
"formatter": "number",
"id": str(uuid.uuid4()),
"line_width": "1",
"metrics": [{"id": str(uuid.uuid4()), "type": "avg", "field": "value.max"}],
"point_size": "3",
"seperate_axis": 1,
"split_mode": "filters",
"stacked": "none",
"filter": "",
"split_filters": [
{
"filter": "ml_processing_time",
"label": "Maximum ML processing time",
"color": "rgba(0,191,179,1)",
"id": str(uuid.uuid4()),
}
],
"label": "ML Time",
"value_template": "{{value}}",
"steps": 0,
}
],
"show_legend": 1,
"show_grid": 1,
"drop_last_bucket": 0,
"time_field": "race-timestamp",
"type": "timeseries",
"filter": TimeSeriesCharts.filter_string(environment, race_config),
"annotations": [
{
"fields": "message",
"template": "{{message}}",
"index_pattern": "rally-annotations",
"query_string": f'((NOT _exists_:track) OR track:"{race_config.track}") '
f"AND ((NOT _exists_:chart) OR chart:ml_processing_time) "
f'AND ((NOT _exists_:chart-name) OR chart-name:"{title}") AND environment:"{environment}"',
"id": str(uuid.uuid4()),
"color": "rgba(102,102,102,1)",
"time_field": "race-timestamp",
"icon": "fa-tag",
"ignore_panel_filters": 1,
}
],
"axis_min": "0",
},
"aggs": [],
"listeners": {},
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": "{}",
"description": "ml_processing_time",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": '{"query":"*","filter":[]}'},
},
}
@staticmethod
def io(title, environment, race_config):
vis_state = {
"title": title,
"type": "metrics",
"params": {
"axis_formatter": "number",
"axis_position": "left",
"id": str(uuid.uuid4()),
"index_pattern": "rally-results-*",
"interval": "1d",
"series": [
{
"axis_position": "left",
"chart_type": "line",
"color": "#68BC00",
"fill": "0",
"formatter": "bytes",
"id": str(uuid.uuid4()),
"line_width": "1",
"metrics": [{"id": str(uuid.uuid4()), "type": "sum", "field": "value.single"}],
"point_size": "3",
"seperate_axis": 1,
"split_mode": "filters",
"stacked": "none",
"filter": "",
"split_filters": [
{
"filter": "name:index_size",
"label": "Index Size",
"color": "rgba(0,191,179,1)",
"id": str(uuid.uuid4()),
},
{
"filter": "name:bytes_written",
"label": "Written",
"color": "rgba(254,209,10,1)",
"id": str(uuid.uuid4()),
},
],
"label": "Disk IO",
"value_template": "{{value}}",
"steps": 0,
}
],
"show_legend": 1,
"show_grid": 1,
"drop_last_bucket": 0,
"time_field": "race-timestamp",
"type": "timeseries",
"filter": TimeSeriesCharts.filter_string(environment, race_config),
"annotations": [
{
"fields": "message",
"template": "{{message}}",
"index_pattern": "rally-annotations",
"query_string": f'((NOT _exists_:track) OR track:"{race_config.track}") AND ((NOT _exists_:chart) OR chart:io) '
f'AND ((NOT _exists_:chart-name) OR chart-name:"{title}") AND environment:"{environment}"',
"id": str(uuid.uuid4()),
"color": "rgba(102,102,102,1)",
"time_field": "race-timestamp",
"icon": "fa-tag",
"ignore_panel_filters": 1,
}
],
"axis_min": "0",
},
"aggs": [],
"listeners": {},
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": "{}",
"description": "io",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": '{"query":"*","filter":[]}'},
},
}
@staticmethod
def query(environment, race_config, q, iterations):
metric = "latency"
title = TimeSeriesCharts.format_title(
environment, race_config.track, es_license=race_config.es_license, suffix="%s-%s-%s" % (race_config.label, q, metric)
)
vis_state = {
"title": title,
"type": "metrics",
"params": {
"id": str(uuid.uuid4()),
"type": "timeseries",
"series": [
{
"id": str(uuid.uuid4()),
"color": color_scheme_rgba[0],
"split_mode": "everything",
"label": "50th percentile",
"metrics": [{"id": str(uuid.uuid4()), "type": "avg", "field": "value.50_0"}],
"seperate_axis": 0,
"axis_position": "right",
"formatter": "number",
"chart_type": "line",
"line_width": 1,
"point_size": 1,
"fill": "0.6",
"stacked": "none",
"split_color_mode": "gradient",
"series_drop_last_bucket": 0,
"value_template": "{{value}} ms",
},
{
"id": str(uuid.uuid4()),
"color": color_scheme_rgba[1],
"split_mode": "everything",
"label": "90th percentile",
"metrics": [{"id": str(uuid.uuid4()), "type": "avg", "field": "value.90_0"}],
"seperate_axis": 0,
"axis_position": "right",
"formatter": "number",
"chart_type": "line",
"line_width": 1,
"point_size": 1,
"fill": "0.4",
"stacked": "none",
"split_color_mode": "gradient",
"series_drop_last_bucket": 0,
"value_template": "{{value}} ms",
},
{
"id": str(uuid.uuid4()),
"color": color_scheme_rgba[2],
"split_mode": "everything",
"label": "99th percentile",
"metrics": [{"id": str(uuid.uuid4()), "type": "avg", "field": "value.99_0"}],
"seperate_axis": 0,
"axis_position": "right",
"formatter": "number",
"chart_type": "line",
"line_width": 1,
"point_size": 1,
"fill": "0.2",
"stacked": "none",
"split_color_mode": "gradient",
"series_drop_last_bucket": 0,
"value_template": "{{value}} ms",
},
{
"id": str(uuid.uuid4()),
"color": color_scheme_rgba[3],
"split_mode": "everything",
"label": "100th percentile",
"metrics": [{"id": str(uuid.uuid4()), "type": "avg", "field": "value.100_0"}],
"seperate_axis": 0,
"axis_position": "right",
"formatter": "number",
"chart_type": "line",
"line_width": 1,
"point_size": 1,
"fill": "0.1",
"stacked": "none",
"split_color_mode": "gradient",
"series_drop_last_bucket": 0,
"value_template": "{{value}} ms",
},
],
"time_field": "race-timestamp",
"index_pattern": "rally-results-*",
"interval": "1d",
"axis_position": "left",
"axis_formatter": "number",
"show_legend": 1,
"show_grid": 1,
"drop_last_bucket": 0,
"background_color_rules": [{"id": str(uuid.uuid4())}],
"filter": 'task:"%s" AND name:"%s" AND %s' % (q, metric, TimeSeriesCharts.filter_string(environment, race_config)),
"annotations": [
{
"fields": "message",
"template": "{{message}}",
"index_pattern": "rally-annotations",
"query_string": f'((NOT _exists_:track) OR track:"{race_config.track}") '
f"AND ((NOT _exists_:chart) OR chart:query) "
f'AND ((NOT _exists_:chart-name) OR chart-name:"{title}") AND environment:"{environment}"',
"id": str(uuid.uuid4()),
"color": "rgba(102,102,102,1)",
"time_field": "race-timestamp",
"icon": "fa-tag",
"ignore_panel_filters": 1,
}
],
},
"aggs": [],
"listeners": {},
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": "{}",
"description": "query",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": '{"query":"*","filter":[]}'},
},
}
@staticmethod
def index(environment, race_configs, title):
filters = []
# any race_config will do - they all belong to the same track
t = race_configs[0].track
for idx, race_config in enumerate(race_configs):
label = index_label(race_config)
for bulk_task in race_config.bulk_tasks:
filters.append(
{
"filter": 'task:"%s" AND %s' % (bulk_task, TimeSeriesCharts.filter_string(environment, race_config)),
"label": label,
"color": color_scheme_rgba[idx % len(color_scheme_rgba)],
"id": str(uuid.uuid4()),
}
)
vis_state = {
"title": title,
"type": "metrics",
"params": {
"axis_formatter": "number",
"axis_position": "left",
"id": str(uuid.uuid4()),
"index_pattern": "rally-results-*",
"interval": "1d",
"series": [
{
"axis_position": "left",
"chart_type": "line",
"color": "#68BC00",
"fill": "0",
"formatter": "number",
"id": str(uuid.uuid4()),
"line_width": "1",
"metrics": [{"id": str(uuid.uuid4()), "type": "avg", "field": "value.median"}],
"point_size": "3",
"seperate_axis": 1,
"split_mode": "filters",
"stacked": "none",
"filter": 'environment:"%s" AND track:"%s"' % (environment, t),
"split_filters": filters,
"label": "Indexing Throughput",
"value_template": "{{value}} docs/s",
"steps": 0,
}
],
"show_legend": 1,
"show_grid": 1,
"drop_last_bucket": 0,
"time_field": "race-timestamp",
"type": "timeseries",
"filter": 'environment:"%s" AND track:"%s" AND name:"throughput" AND active:true' % (environment, t),
"annotations": [
{
"fields": "message",
"template": "{{message}}",
"index_pattern": "rally-annotations",
"query_string": f'((NOT _exists_:track) OR track:"{t}") '
f"AND ((NOT _exists_:chart) OR chart:indexing) "
f'AND ((NOT _exists_:chart-name) OR chart-name:"{title}") AND environment:"{environment}"',
"id": str(uuid.uuid4()),
"color": "rgba(102,102,102,1)",
"time_field": "race-timestamp",
"icon": "fa-tag",
"ignore_panel_filters": 1,
}
],
"axis_min": "0",
},
"aggs": [],
"listeners": {},
}
return {
"id": str(uuid.uuid4()),
"type": "visualization",
"attributes": {
"title": title,
"visState": json.dumps(vis_state),
"uiStateJSON": "{}",
"description": "index",
"version": 1,
"kibanaSavedObjectMeta": {"searchSourceJSON": '{"query":"*","filter":[]}'},
},
}
class RaceConfigTrack:
def __init__(self, cfg, repository, name=None):
self.repository = repository
self.cached_track = self.load_track(cfg, name=name)
def load_track(self, cfg, name=None, params=None, excluded_tasks=None):
if not params:
params = {}
# required in case a previous track using a different repository has specified the revision
if cfg.opts("track", "repository.name", mandatory=False) != self.repository:
cfg.add(config.Scope.applicationOverride, "track", "repository.revision", None)
# hack to make this work with multiple tracks (Rally core is usually not meant to be used this way)
if name:
cfg.add(config.Scope.applicationOverride, "track", "repository.name", self.repository)
cfg.add(config.Scope.applicationOverride, "track", "track.name", name)
# another hack to ensure any track-params in the race config are used by Rally's track loader
cfg.add(config.Scope.applicationOverride, "track", "params", params)
if excluded_tasks:
cfg.add(config.Scope.application, "track", "exclude.tasks", excluded_tasks)
return track.load_track(cfg)
def get_track(self, cfg, name=None, params=None, excluded_tasks=None):
if params or excluded_tasks:
return self.load_track(cfg, name, params, excluded_tasks)
# if no params specified, return the initially cached, (non-parametrized) track
return self.cached_track
def generate_index_ops(chart_type, race_configs, environment, logger):
idx_race_configs = list(filter(lambda c: "indexing" in c.charts, race_configs))
for race_conf in idx_race_configs:
logger.debug(
"Gen index visualization for race config with name:[%s] / label:[%s] / flavor: [%s] / license: [%s]",
race_conf.name,
race_conf.label,
race_conf.flavor,
race_conf.es_license,
)
charts = []
if idx_race_configs:
title = chart_type.format_title(environment, race_configs[0].track, flavor=race_configs[0].flavor, suffix="indexing-throughput")
charts = [chart_type.index(environment, idx_race_configs, title)]
return charts
def generate_queries(chart_type, race_configs, environment):
# output JSON structures
structures = []
for race_config in race_configs:
if "query" in race_config.charts:
for q in race_config.throttled_tasks:
structures.append(chart_type.query(environment, race_config, q.name, q.params.get("iterations", 100)))
return structures
def generate_io(chart_type, race_configs, environment):
# output JSON structures
structures = []
for race_config in race_configs:
if "io" in race_config.charts:
title = chart_type.format_title(
environment, race_config.track, es_license=race_config.es_license, suffix="%s-io" % race_config.label
)
structures.append(chart_type.io(title, environment, race_config))
return structures
def generate_gc(chart_type, race_configs, environment):
structures = []
for race_config in race_configs:
if "gc" in race_config.charts:
title = chart_type.format_title(
environment, race_config.track, es_license=race_config.es_license, suffix="%s-gc" % race_config.label
)
structures.append(chart_type.gc(title, environment, race_config))
return structures
def generate_merge_time(chart_type, race_configs, environment):
structures = []
if chart_type == BarCharts:
return structures
for race_config in race_configs:
if "merge_times" in race_config.charts:
title = chart_type.format_title(
environment, race_config.track, es_license=race_config.es_license, suffix=f"{race_config.label}-merge-times"
)
chart = chart_type.merge_time(title, environment, race_config)
if chart is not None:
structures.append(chart)
return structures
def generate_ml_processing_time(chart_type, race_configs, environment):
structures = []
for race_config in race_configs:
if "ml_processing_time" in race_config.charts:
title = chart_type.format_title(
environment, race_config.track, es_license=race_config.es_license, suffix=f"{race_config.label}-ml-processing-time"
)
chart = chart_type.ml_processing_time(title, environment, race_config)
if chart is not None:
structures.append(chart)
return structures
def generate_merge_count(chart_type, race_configs, environment):
structures = []
for race_config in race_configs:
if "merge_count" in race_config.charts:
title = chart_type.format_title(
environment, race_config.track, es_license=race_config.es_license, suffix=f"{race_config.label}-merge-count"
)
chart = chart_type.merge_count(title, environment, race_config)
if chart is not None:
structures.append(chart)
return structures
def generate_dashboard(chart_type, environment, track, charts, flavor=None):
panels = []
width = 24
height = 32
row = 0
col = 0
for idx, chart in enumerate(charts):
panelIndex = idx + 1
# make index charts wider
if chart["attributes"]["description"] == "index":
chart_width = 2 * width
# force one panel per row
next_col = 0
else:
chart_width = width
# two rows per panel
next_col = (col + 1) % 2
panel = {
"id": chart["id"],
"panelIndex": panelIndex,
"gridData": {"x": (col * chart_width), "y": (row * height), "w": chart_width, "h": height, "i": str(panelIndex)},
"type": "visualization",
"version": "7.10.2",
}
panels.append(panel)
col = next_col
if col == 0:
row += 1
return {
"id": str(uuid.uuid4()),
"type": "dashboard",
"attributes": {
"title": chart_type.format_title(environment, track.name, flavor=flavor),
"hits": 0,
"description": "",
"panelsJSON": json.dumps(panels),
"optionsJSON": '{"darkTheme":false}',
"uiStateJSON": "{}",
"version": 1,
"timeRestore": False,
"kibanaSavedObjectMeta": {
"searchSourceJSON": json.dumps(
{
"filter": [{"query": {"query_string": {"analyze_wildcard": True, "query": "*"}}}],
"highlightAll": True,
"version": True,
}
)
},
},
}
class RaceConfig:
def __init__(self, track, cfg=None, flavor=None, es_license=None, challenge=None, car=None, node_count=None, charts=None):
self.track = track
if cfg:
self.configuration = cfg
self.configuration["flavor"] = flavor
self.configuration["es_license"] = es_license
else:
self.configuration = {"charts": charts, "challenge": challenge, "car": car, "node-count": node_count}
@property
def name(self):
return self.configuration.get("name")
@property
def flavor(self):
return self.configuration.get("flavor")
@property
def es_license(self):
return self.configuration.get("es_license")
@property
def label(self):
return self.configuration.get("label")
@property
def charts(self):
return self.configuration["charts"]
@property
def node_count(self):
return self.configuration.get("node-count", 1)
@property
def challenge(self):
return self.configuration["challenge"]
@property
def car(self):
return self.configuration["car"]
@property
def plugins(self):
return self.configuration.get("plugins", "")
@property
def bulk_tasks(self):
task_names = []
for task in self.track.find_challenge_or_default(self.challenge).schedule:
for sub_task in task:
# We are looking for type bulk operations to add to indexing throughput chart.
# For the observability track, the index operation is of type raw-bulk, instead of type bulk.
# Doing a lenient match to allow for that.
if track.OperationType.Bulk.to_hyphenated_string() in sub_task.operation.type:
if track.OperationType.Bulk.to_hyphenated_string() != sub_task.operation.type:
console.info(
f"Found [{sub_task.name}] of type [{sub_task.operation.type}] in "
f"[{self.challenge}], adding it to indexing dashboard.\n",
flush=True,
)
task_names.append(sub_task.name)
return task_names
@property
def throttled_tasks(self):
task_names = []
for task in self.track.find_challenge_or_default(self.challenge).schedule:
for sub_task in task:
# We are assuming here that each task with a target throughput or target interval is interesting for latency charts.
#
# As a temporary workaround we're also treating operations of type "eql" as throttled tasks (requiring a latency
# or service time chart) although they are (at the moment) not throttled. These tasks originate from the EQL track
# available at https://github.com/elastic/rally-tracks/tree/master/eql.
#
# We should refactor the chart generator to make this classification logic more flexible so the user can specify
# which tasks / or types of operations should be used for which chart types.
if (
sub_task.operation.type in ["search", "composite", "eql", "paginated-search", "scroll-search"]
or "target-throughput" in sub_task.params
or "target-interval" in sub_task.params
):
task_names.append(sub_task)
return task_names
def load_race_configs(cfg, chart_type, chart_spec_path=None):
def add_configs(race_configs_per_lic, flavor_name="oss", lic="oss", track_name=None):
configs_per_lic = []
for race_config in race_configs_per_lic:
excluded_tasks = None
if "exclude-tasks" in race_config:
excluded_tasks = race_config.get("exclude-tasks").split(",")
configs_per_lic.append(
RaceConfig(
track=race_config_track.get_track(
cfg, name=track_name, params=race_config.get("track-params", {}), excluded_tasks=excluded_tasks
),
cfg=race_config,
flavor=flavor_name,
es_license=lic,
)
)
return configs_per_lic
def add_race_configs(license_configs, flavor_name, track_name):
if chart_type == BarCharts:
# Only one license config, "trial", is present in bar charts
_lic_conf = [license_config["configurations"] for license_config in license_configs if license_config["name"] == "trial"]
if _lic_conf:
race_configs_per_track.extend(add_configs(_lic_conf[0], track_name=track_name))
else:
for lic_config in license_configs:
race_configs_per_track.extend(add_configs(lic_config["configurations"], flavor_name, lic_config["name"], track_name))
race_configs = {"oss": [], "default": []}
if chart_type == BarCharts:
race_configs = []
chart_specs = glob.glob(io.normalize_path(chart_spec_path))
if not chart_specs:
raise exceptions.NotFound(f"Chart spec path [{chart_spec_path}] not found.")
for _track_file in chart_specs:
with open(_track_file, mode="rt", encoding="utf-8") as f:
for item in json.load(f):
_track_repository = item.get("track-repository", "default")
race_config_track = RaceConfigTrack(cfg, _track_repository, name=item["track"])
for flavor in item["flavors"]:
race_configs_per_track = []
_flavor_name = flavor["name"]
_track_name = item["track"]
add_race_configs(flavor["licenses"], _flavor_name, _track_name)
if race_configs_per_track:
if chart_type == BarCharts:
race_configs.append(race_configs_per_track)
else:
race_configs[_flavor_name].append(race_configs_per_track)
return race_configs
def gen_charts_per_track_configs(race_configs, chart_type, env, flavor=None, logger=None):
charts = (
generate_index_ops(chart_type, race_configs, env, logger)
+ generate_io(chart_type, race_configs, env)
+ generate_gc(chart_type, race_configs, env)
+ generate_merge_time(chart_type, race_configs, env)
+ generate_merge_count(chart_type, race_configs, env)
+ generate_ml_processing_time(chart_type, race_configs, env)
+ generate_queries(chart_type, race_configs, env)
)
dashboard = generate_dashboard(chart_type, env, race_configs[0].track, charts, flavor)
return charts, dashboard
def gen_charts_per_track(race_configs, chart_type, env, flavor=None, logger=None):
structures = []
for race_configs_per_track in race_configs:
charts, dashboard = gen_charts_per_track_configs(race_configs_per_track, chart_type, env, flavor, logger)
structures.extend(charts)
structures.append(dashboard)
return structures
def gen_charts_from_track_combinations(race_configs, chart_type, env, logger):
structures = []
for flavor, race_configs_per_flavor in race_configs.items():
for race_configs_per_track in race_configs_per_flavor:
logger.debug("Generating charts for race_configs with name:[%s]/flavor:[%s]", race_configs_per_track[0].name, flavor)
charts, dashboard = gen_charts_per_track_configs(race_configs_per_track, chart_type, env, flavor, logger)
structures.extend(charts)
structures.append(dashboard)
return structures
def generate(cfg):
logger = logging.getLogger(__name__)
chart_spec_path = cfg.opts("generator", "chart.spec.path")
if cfg.opts("generator", "chart.type") == "time-series":
chart_type = TimeSeriesCharts
else:
chart_type = BarCharts
console.info("Loading track data...", flush=True)
race_configs = load_race_configs(cfg, chart_type, chart_spec_path)
env = cfg.opts("system", "env.name")
structures = []
console.info("Generating charts...", flush=True)
if chart_type == BarCharts:
# bar charts are flavor agnostic and split results based on a separate `user.setup` field
structures = gen_charts_per_track(race_configs, chart_type, env, logger=logger)
elif chart_type == TimeSeriesCharts:
structures = gen_charts_from_track_combinations(race_configs, chart_type, env, logger)
output_path = cfg.opts("generator", "output.path")
if output_path:
with open(io.normalize_path(output_path), mode="wt", encoding="utf-8") as f:
for record in structures:
print(json.dumps(record), file=f)
else:
for record in structures:
print(json.dumps(record))
| 40.048417 | 139 | 0.433166 |
970d5b912cf01797894f63a6209f4e7a2557c70a | 1,865 | py | Python | code/count_word_coll.py | reginaldcobb/findbook | 95fd179426a1ee69802b4c541c0d22b8037e59cc | [
"MIT"
] | null | null | null | code/count_word_coll.py | reginaldcobb/findbook | 95fd179426a1ee69802b4c541c0d22b8037e59cc | [
"MIT"
] | null | null | null | code/count_word_coll.py | reginaldcobb/findbook | 95fd179426a1ee69802b4c541c0d22b8037e59cc | [
"MIT"
] | null | null | null | #!/usr/bin/python
import collections
import os
def remove_newlines(fname):
flist = open(fname).readlines()
return [s.rstrip('\n') for s in flist]
wordcount = collections.Counter()
# with open('text_book4png_3200_3200_0.65_0.5_1_10_02_26_01_24_54.txt') as f:
# for line in f:
# wordcount.update(line.split())
# # print(wordcount)
# file1 = open("/home/rcobb/Downloads/text_filename.txt","a")
# file1.write(str(wordcount))
# file1.close()
# for k,v in wordcount.items():
# print (k, v)
file1 = open("/home/rcobb/Downloads/text_filename.txt","a")
with os.scandir('./') as entries:
for entry in entries:
wordcount.clear()
# if str.find(str(entry.name))
if entry.name.find(".txt") > -1:
# print(entry.name)
with open(entry.name) as f:
for line in f:
wordcount.update(line.split())
# wordcount['filename'] = entry.name
# print(wordcount)
# print(entry.name)
# file1.write(entry.name)
# file1.write(str(wordcount))
# file1.write("%%%")
file1.write("\n")
file1.write(entry.name)
file1.write(";")
print(entry.name)
for k,v in wordcount.items():
# print (k, v)
for i in range (1,(int(v)+1)):
# print (k)
file1.write(k.upper())
file1.write(";")
# file1.write("\n")
file1.close()
# remove_newlines("/home/rcobb/Downloads/text_filename.txt")
# print ("Wordcoutn =", wordcount)
# print ("opening to remove CR")
# flist = open("/home/rcobb/Downloads/text_filename.txt").readlines()
# for s in flist:
# s.rstrip('\n')
# print ("finished removing CR")
# flist.close()
| 32.155172 | 77 | 0.542091 |
e2b294afe12c7b4ebb561b044a434dd0d0fff855 | 1,617 | py | Python | tests/test_services/test_parse_bp/actions.py | Jumpscale/ays_jumpscale8 | 4ff4a2fb3b95de6f46ea494bd5b5a2a0fb9ecdb1 | [
"Apache-2.0"
] | 4 | 2017-06-07T08:10:06.000Z | 2017-11-10T02:20:38.000Z | tests/test_services/test_parse_bp/actions.py | Jumpscale/ays9 | 63bd414ff06372ba885c55eec528f427e63bcbe1 | [
"Apache-2.0"
] | 242 | 2017-05-18T10:51:48.000Z | 2019-09-18T15:09:47.000Z | tests/test_services/test_parse_bp/actions.py | Jumpscale/ays_jumpscale8 | 4ff4a2fb3b95de6f46ea494bd5b5a2a0fb9ecdb1 | [
"Apache-2.0"
] | 5 | 2017-06-16T15:43:25.000Z | 2017-09-29T12:48:06.000Z | def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'test': ['install']
}
def test(job):
"""
Tests parsing of a bp with/without default values
"""
import sys
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
try:
# Tests that parsing args with values set in the bp will override default values
if job.service.name == 'without_defaultvalue':
if model.data.description != 'another description':
model.data.result = RESULT_FAILED % ('Values in blueprint do not override default values')
# Tests that parsing args with default values works
elif job.service.name == 'with_defaultvalue':
if model.data.description != 'description':
model.data.result = RESULT_FAILED % ("Parsing blueprint with default values failed")
# Tests parsing blueprint that has special characters
elif job.service.name == 'with_special_characters':
if model.data.description != 'Können Sie mir behilflich sein?':
model.data.result = RESULT_FAILED % ("Failed to parse blueprint with special characters")
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
job.service.save()
| 36.75 | 106 | 0.648114 |
b7cea46d819c6d903b3e620239377655890645d6 | 7,733 | py | Python | lib/spack/spack/cmd/mirror.py | rtohid/spack | 6df57bb2d0619a22b0bb0a5028b7caef7f31e722 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | lib/spack/spack/cmd/mirror.py | rtohid/spack | 6df57bb2d0619a22b0bb0a5028b7caef7f31e722 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17 | 2018-09-20T18:32:50.000Z | 2019-12-04T16:58:12.000Z | lib/spack/spack/cmd/mirror.py | rtohid/spack | 6df57bb2d0619a22b0bb0a5028b7caef7f31e722 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import os
from datetime import datetime
import argparse
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
import spack.cmd
import spack.concretize
import spack.config
import spack.mirror
import spack.repo
import spack.cmd.common.arguments as arguments
import spack.environment as ev
from spack.spec import Spec
from spack.error import SpackError
from spack.util.spack_yaml import syaml_dict
description = "manage mirrors (source and binary)"
section = "config"
level = "long"
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ['no_checksum'])
sp = subparser.add_subparsers(
metavar='SUBCOMMAND', dest='mirror_command')
# Create
create_parser = sp.add_parser('create', help=mirror_create.__doc__)
create_parser.add_argument('-d', '--directory', default=None,
help="directory in which to create mirror")
create_parser.add_argument(
'specs', nargs=argparse.REMAINDER,
help="specs of packages to put in mirror")
create_parser.add_argument(
'-f', '--file', help="file with specs of packages to put in mirror")
create_parser.add_argument(
'-D', '--dependencies', action='store_true',
help="also fetch all dependencies")
create_parser.add_argument(
'-n', '--versions-per-spec', type=int,
default=1,
help="the number of versions to fetch for each spec")
# used to construct scope arguments below
scopes = spack.config.scopes()
scopes_metavar = spack.config.scopes_metavar
# Add
add_parser = sp.add_parser('add', help=mirror_add.__doc__)
add_parser.add_argument('name', help="mnemonic name for mirror")
add_parser.add_argument(
'url', help="url of mirror directory from 'spack mirror create'")
add_parser.add_argument(
'--scope', choices=scopes, metavar=scopes_metavar,
default=spack.config.default_modify_scope(),
help="configuration scope to modify")
# Remove
remove_parser = sp.add_parser('remove', aliases=['rm'],
help=mirror_remove.__doc__)
remove_parser.add_argument('name')
remove_parser.add_argument(
'--scope', choices=scopes, metavar=scopes_metavar,
default=spack.config.default_modify_scope(),
help="configuration scope to modify")
# List
list_parser = sp.add_parser('list', help=mirror_list.__doc__)
list_parser.add_argument(
'--scope', choices=scopes, metavar=scopes_metavar,
default=spack.config.default_list_scope(),
help="configuration scope to read from")
def mirror_add(args):
"""Add a mirror to Spack."""
url = args.url
if url.startswith('/'):
url = 'file://' + url
mirrors = spack.config.get('mirrors', scope=args.scope)
if not mirrors:
mirrors = syaml_dict()
for name, u in mirrors.items():
if name == args.name:
tty.die("Mirror with name %s already exists." % name)
if u == url:
tty.die("Mirror with url %s already exists." % url)
# should only be one item per mirror dict.
items = [(n, u) for n, u in mirrors.items()]
items.insert(0, (args.name, url))
mirrors = syaml_dict(items)
spack.config.set('mirrors', mirrors, scope=args.scope)
def mirror_remove(args):
"""Remove a mirror by name."""
name = args.name
mirrors = spack.config.get('mirrors', scope=args.scope)
if not mirrors:
mirrors = syaml_dict()
if name not in mirrors:
tty.die("No mirror with name %s" % name)
old_value = mirrors.pop(name)
spack.config.set('mirrors', mirrors, scope=args.scope)
tty.msg("Removed mirror %s with url %s" % (name, old_value))
def mirror_list(args):
"""Print out available mirrors to the console."""
mirrors = spack.config.get('mirrors', scope=args.scope)
if not mirrors:
tty.msg("No mirrors configured.")
return
max_len = max(len(n) for n in mirrors.keys())
fmt = "%%-%ds%%s" % (max_len + 4)
for name in mirrors:
print(fmt % (name, mirrors[name]))
def _read_specs_from_file(filename):
specs = []
with open(filename, "r") as stream:
for i, string in enumerate(stream):
try:
s = Spec(string)
s.package
specs.append(s)
except SpackError as e:
tty.debug(e)
tty.die("Parse error in %s, line %d:" % (filename, i + 1),
">>> " + string, str(e))
return specs
def mirror_create(args):
"""Create a directory to be used as a spack mirror, and fill it with
package archives."""
# try to parse specs from the command line first.
with spack.concretize.concretizer.disable_compiler_existence_check():
specs = spack.cmd.parse_specs(args.specs, concretize=True)
# If there is a file, parse each line as a spec and add it to the list.
if args.file:
if specs:
tty.die("Cannot pass specs on the command line with --file.")
specs = _read_specs_from_file(args.file)
# If nothing is passed, use environment or all if no active env
if not specs:
env = ev.get_env(args, 'mirror')
if env:
specs = env.specs_by_hash.values()
else:
specs = [Spec(n) for n in spack.repo.all_package_names()]
specs.sort(key=lambda s: s.format("{name}{@version}").lower())
# If the user asked for dependencies, traverse spec DAG get them.
if args.dependencies:
new_specs = set()
for spec in specs:
spec.concretize()
for s in spec.traverse():
new_specs.add(s)
specs = list(new_specs)
# Skip external specs, as they are already installed
external_specs = [s for s in specs if s.external]
specs = [s for s in specs if not s.external]
for spec in external_specs:
msg = 'Skipping {0} as it is an external spec.'
tty.msg(msg.format(spec.cshort_spec))
# Default name for directory is spack-mirror-<DATESTAMP>
directory = args.directory
if not directory:
timestamp = datetime.now().strftime("%Y-%m-%d")
directory = 'spack-mirror-' + timestamp
# Make sure nothing is in the way.
existed = os.path.isdir(directory)
# Actually do the work to create the mirror
present, mirrored, error = spack.mirror.create(
directory, specs, num_versions=args.versions_per_spec)
p, m, e = len(present), len(mirrored), len(error)
verb = "updated" if existed else "created"
tty.msg(
"Successfully %s mirror in %s" % (verb, directory),
"Archive stats:",
" %-4d already present" % p,
" %-4d added" % m,
" %-4d failed to fetch." % e)
if error:
tty.error("Failed downloads:")
colify(s.cformat("{name}{@version}") for s in error)
sys.exit(1)
def mirror(parser, args):
action = {'create': mirror_create,
'add': mirror_add,
'remove': mirror_remove,
'rm': mirror_remove,
'list': mirror_list}
if args.no_checksum:
spack.config.set('config:checksum', False, scope='command_line')
action[args.mirror_command](args)
| 33.768559 | 79 | 0.61619 |
7287a6f65ee56ebf576d781a07d2da3b3d83951b | 10,702 | py | Python | BeautyRecommendation/Data/yolo_image.py | hundyoung/BeautyRecommendation | 96680b5b1d9eceb0022cf833bda1ae0c1d9c499f | [
"MIT"
] | 1 | 2020-09-02T15:38:06.000Z | 2020-09-02T15:38:06.000Z | BeautyRecommendation/Data/yolo_image.py | hundyoung/BeautyRecommendation | 96680b5b1d9eceb0022cf833bda1ae0c1d9c499f | [
"MIT"
] | null | null | null | BeautyRecommendation/Data/yolo_image.py | hundyoung/BeautyRecommendation | 96680b5b1d9eceb0022cf833bda1ae0c1d9c499f | [
"MIT"
] | null | null | null | import colorsys
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from Data.yolo3.model import yolo_eval, yolo_body, tiny_yolo_body,box_iou
from Data.yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
class YOLO(object):
_defaults = {
"model_path": 'model_data/yolo.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score": 0.3,
"iou": 0.45,
"model_image_size": (416, 416),
"gpu_num": 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.generate()
# Initialize the parameters
self.confThreshold = 0.5 # Confidence threshold
self.nmsThreshold = 0.4 # Non-maximum suppression threshold
self.inpWidth = 416 # Width of network's input image
self.inpHeight = 416 # Height of network's input image
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors == 6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None, None, 3)), num_anchors // 2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors / len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
if self.gpu_num >= 2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
prediction = self.yolo_model.predict(image_data)
boxes = self.postProcess(prediction,self.anchors,len(self.class_names),(image.size[1],image.size[0]))
# print('Found {} boxes for {}'.format(len(boxes), 'img'))
#
# font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
# size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
# thickness = (image.size[0] + image.size[1]) // 300
predicted_class_list=[]
for top,left,bottom,right,box_classes,box_class_scores in boxes:
box_classes=int(box_classes)
predicted_class = self.class_names[box_classes]
predicted_class_list.append(predicted_class)
return predicted_class_list
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
# Remove the bounding boxes with low confidence using non-maxima suppression
def postProcess(self,feats,anchors, num_classes, image_shape):
"""Convert final layer features to bounding box parameters."""
# Reshape to batch, height, width, num_anchors, box_params.
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
result = []
boxes_list = []
box_scores_list = []
for l in range(len(feats)):
anchor = anchors[anchor_mask[l]]
anchors_tensor = np.reshape(anchor, [1, 1, 1, len(anchor), 2])
feat = feats[l]
grid_shape = np.shape(feat)[1:3] # height, width
grid_y = np.tile(np.reshape(np.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = np.tile(np.reshape(np.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = np.concatenate((grid_x, grid_y),axis=-1)
# grid = np.concatenate((grid_x, grid_y))
feat = np.reshape(
feat, [-1, grid_shape[0], grid_shape[1], 3, 80 + 5])
box_confidence = self.sigmoid(feat[..., 4:5])
box_class_probs = self.sigmoid(feat[..., 5:])
input_shape = np.array([416,416])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (self.sigmoid(feat[..., :2]) + grid) / grid_shape[::-1]
box_wh = np.exp(feat[..., 2:4]) * anchors_tensor / input_shape[::-1]
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
new_shape = np.round(np.multiply(image_shape,min(input_shape / image_shape)))
offset = (input_shape - new_shape) / 2. / input_shape
scale = input_shape / new_shape
box_yx = np.multiply((box_yx - offset), scale)
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = np.concatenate((
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
),axis=-1)
# Scale boxes back to original image shape.
boxes = np.multiply(boxes, np.concatenate((image_shape, image_shape)))
box_scores = np.multiply(box_confidence,box_class_probs)
#NMS
boxes = np.reshape(boxes, [-1, 4])
box_scores = np.reshape(box_scores, [-1, num_classes])
boxes_list.append(boxes)
box_scores_list.append(box_scores)
boxes_list = np.concatenate(boxes_list, axis=0)
box_scores_list = np.concatenate(box_scores_list, axis=0)
for c in range(num_classes):
scores = box_scores_list[:,c]
boxes_class = np.insert(boxes_list,4, c,1)
boxes_class = np.insert(boxes_class,5, scores,1)
mask = scores>self.confThreshold
boxes_class = boxes_class[mask]
if len(boxes_class)>0:
boxes_class=sorted(boxes_class,key=lambda x:x[-1],reverse=True)
iou_list = np.array(boxes_class)
while len(iou_list)>0:
candidate_box= iou_list[0]
result.append(candidate_box)
if len(iou_list)==1:
break
iou_list = iou_list[1:]
b1_mins = iou_list[:,:2]
b2_mins = candidate_box[:2]
b1_maxs = iou_list[:,2:4]
b2_maxs = candidate_box[2:4]
intersect_mins = np.maximum(b1_mins, b2_mins)
intersect_maxes = np.minimum(b1_maxs, b2_maxs)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = (b1_maxs[:,0] - b1_mins[:,0]) * (b1_maxs[:,1] - b1_mins[:,1])
b2_area = (b2_maxs[0] - b2_mins[0]) * (b2_maxs[1] - b2_mins[1])
iou = intersect_area / (b1_area + b2_area - intersect_area)
iou_mask = iou<=self.iou
# iou_mask =np.concatenate(iou_mask,axis=0)
iou_list = iou_list[iou_mask]
#Without nms
# boxes_class = np.concatenate((boxes,box_scores),axis=-1)[0]
# for i in range(len(boxes_class)):
# grid_y_list = boxes_class[i]
# for j in range(len(grid_y_list)):
# grid = grid_y_list[j]
# for k in range(len(grid)):
# box = grid[k]
# for m in range(4,len(box)):
# probability = box[m]
# if probability>self.confThreshold:
# result.append((box[0],box[1],box[2],box[3],m-4,probability))
return result
if __name__ == '__main__':
img = '.\TestData\\soccer_foul.jpg'
image = Image.open(img)
yolo = YOLO()
r_image = yolo.detect_image(image)
r_image.show()
| 44.406639 | 110 | 0.572229 |
f19f1872afe1d5ca1900579736e015be63ca64a1 | 2,834 | py | Python | tests/test_tokenizer.py | domonless/cppjieba-py | 73479ab9c4edbdec8a58a9b021a091e0bb1bd113 | [
"MIT"
] | null | null | null | tests/test_tokenizer.py | domonless/cppjieba-py | 73479ab9c4edbdec8a58a9b021a091e0bb1bd113 | [
"MIT"
] | null | null | null | tests/test_tokenizer.py | domonless/cppjieba-py | 73479ab9c4edbdec8a58a9b021a091e0bb1bd113 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# pylint: disable=E1101
from spec import Spec
import sys
if sys.version_info[0] >= 3:
from pathlib import Path
else:
from pathlib2 import Path
DICT_DIR = Path("../cppjieba/dict")
DICT = str(DICT_DIR / "jieba.dict.utf8")
USER_DICT = str(DICT_DIR / "user.dict.utf8")
STOP_WORD = str(DICT_DIR / "stop_words.utf8")
from cppjieba_py import Tokenizer
class TokenizerTest(Spec):
@classmethod
def setUpClass(cls):
cls.dt = Tokenizer(DICT)
cls.dt.add_word("区块链", 10, "nz")
class init_0:
"__init__"
def takes_arg1_as_main_dict_path(self):
pass
def takes_arg2_as_user_dict_path(self):
Tokenizer(DICT, USER_DICT)
def takes_arg3_as_stopword_path(self):
Tokenizer(DICT, USER_DICT, STOP_WORD)
class cut:
def takes_arg1_as_sentence(self):
self.dt.cut("")
def takes_arg2_as_cut_all(self):
self.dt.cut("", True)
def takes_arg3_as_HMM(self):
self.dt.cut("", True, True)
def returns_iterator(self):
from collections import Iterable, Sequence
r = self.dt.cut("", True, True)
iterable = isinstance(r, Iterable)
sequence = isinstance(r, Sequence)
assert iterable and not sequence
class lcut:
def takes_arg1_as_sentence(self):
self.dt.cut("")
def takes_arg2_as_cut_all(self):
self.dt.cut("", True)
def takes_arg3_as_HMM(self):
self.dt.cut("", True, True)
def returns_list(self):
r = self.dt.lcut("", True, True)
assert isinstance(r, list)
class load_userdict:
def accept_string_as_arg(self):
self.dt.load_userdict("")
def accept_list_as_arg(self):
self.dt.load_userdict([])
def accept_set_as_arg(self):
self.dt.load_userdict(set([]))
class add_word:
def takes_arg1_as_word(self):
self.dt.add_word("区块链")
def takes_arg2_as_freq(self):
self.dt.add_word("区块链", 10)
def takes_arg3_as_tag(self):
pass
class find:
def takes_arg1_as_word(self):
self.dt.find("区块链")
def can_find_added_word(self):
r = self.dt.find("区块链")
assert r == True
class lookup_tag:
def takes_arg1_as_word(self):
self.dt.lookup_tag("区块链")
def can_find_added_word(self):
self.dt.add_word("区块链", 10, "nz") # because of random test order
# from nose.plugins.skip import Skip
r = self.dt.lookup_tag("区块链")
# try:
assert r == "nz"
# except AssertionError:
# raise Skip()
| 26 | 76 | 0.581157 |
8316c900695577c10c9077aa73bf9b92cbfc7c4e | 2,333 | py | Python | brian2/core/namespace.py | mackelab/brian2 | 3a58c8e1ca87f70972aca24c6eeb78c539c190db | [
"BSD-2-Clause"
] | null | null | null | brian2/core/namespace.py | mackelab/brian2 | 3a58c8e1ca87f70972aca24c6eeb78c539c190db | [
"BSD-2-Clause"
] | null | null | null | brian2/core/namespace.py | mackelab/brian2 | 3a58c8e1ca87f70972aca24c6eeb78c539c190db | [
"BSD-2-Clause"
] | null | null | null | """
Implementation of the namespace system, used to resolve the identifiers in
model equations of `NeuronGroup` and `Synapses`
"""
import collections
import inspect
import itertools
from brian2.utils.logger import get_logger
from brian2.units.fundamentalunits import (
standard_unit_register,
additional_unit_register,
)
from brian2.units.stdunits import stdunits
from brian2.core.functions import DEFAULT_FUNCTIONS, DEFAULT_CONSTANTS
__all__ = [
"get_local_namespace",
"DEFAULT_FUNCTIONS",
"DEFAULT_UNITS",
"DEFAULT_CONSTANTS",
]
logger = get_logger(__name__)
def get_local_namespace(level):
"""
Get the surrounding namespace.
Parameters
----------
level : int, optional
How far to go back to get the locals/globals. Each function/method
call should add ``1`` to this argument, functions/method with a
decorator have to add ``2``.
Returns
-------
namespace : dict
The locals and globals at the given depth of the stack frame.
"""
# Get the locals and globals from the stack frame
frame = inspect.currentframe()
for _ in range(level + 1):
frame = frame.f_back
# We return the full stack here, even if it contains a lot of stuff we are
# not interested in -- it is cheaper to later raise an error when we find
# a specific object with an incorrect type instead of going through this big
# list now to check the types of all objects
return dict(itertools.chain(frame.f_globals.items(), frame.f_locals.items()))
def _get_default_unit_namespace():
"""
Return the namespace that is used by default for looking up units when
defining equations. Contains all registered units and everything from
`brian2.units.stdunits` (ms, mV, nS, etc.).
Returns
-------
namespace : dict
The unit namespace
"""
namespace = collections.OrderedDict(standard_unit_register.units)
namespace.update(stdunits)
# Include all "simple" units from additional_units, i.e. units like mliter
# but not "newton * metre"
namespace.update(
dict(
(name, unit)
for name, unit in additional_unit_register.units.items()
if not unit.iscompound
)
)
return namespace
DEFAULT_UNITS = _get_default_unit_namespace()
| 28.802469 | 81 | 0.690527 |
7a55acf5c15d7d224eb0d8235cce62d38fb40b94 | 592 | py | Python | class/functions.py | lizebang/think-python-2e-examples | 2ba647573d2f2e4d870aaf827044db2035104cb0 | [
"MIT"
] | null | null | null | class/functions.py | lizebang/think-python-2e-examples | 2ba647573d2f2e4d870aaf827044db2035104cb0 | [
"MIT"
] | null | null | null | class/functions.py | lizebang/think-python-2e-examples | 2ba647573d2f2e4d870aaf827044db2035104cb0 | [
"MIT"
] | null | null | null | class Time:
"""Represents the time of day.
attributes: hour, minute, second
"""
# pure functions, modifiers and designed development
# think python 2e -- chapter16
# designed development
def time_to_int(time):
minutes = time.hour * 60 + time.minute
seconds = minutes * 60 + time.second
return seconds
def int_to_time(seconds):
time = Time()
minutes, time.second = divmod(seconds, 60)
time.hour, time.minute = divmod(minutes, 60)
return time
def add_time(t1, t2):
seconds = time_to_int(t1) + time_to_int(t2)
return int_to_time(seconds)
| 20.413793 | 52 | 0.679054 |
dcb54906b0ac30a6ee34c2c085d9bb2b5167075a | 6,906 | py | Python | tests/functional_test/Libs/flask/config.py | HopeBayMobile/hcfs | 153666610f42fd6c39f2ca1f1864ebb8652e6b2c | [
"Apache-2.0"
] | null | null | null | tests/functional_test/Libs/flask/config.py | HopeBayMobile/hcfs | 153666610f42fd6c39f2ca1f1864ebb8652e6b2c | [
"Apache-2.0"
] | null | null | null | tests/functional_test/Libs/flask/config.py | HopeBayMobile/hcfs | 153666610f42fd6c39f2ca1f1864ebb8652e6b2c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021 HopeBayTech.
#
# This file is part of Tera.
# See https://github.com/HopeBayMobile for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| 36.930481 | 79 | 0.648422 |
325914ff90633c2bcd2560b23487ad036b606276 | 80 | py | Python | yolov4-deepsort/func.py | saru-d2/road-tracking-yolov4 | fca383e9ac01db79d6a62c6998e31d6f3b8c60a7 | [
"MIT"
] | null | null | null | yolov4-deepsort/func.py | saru-d2/road-tracking-yolov4 | fca383e9ac01db79d6a62c6998e31d6f3b8c60a7 | [
"MIT"
] | null | null | null | yolov4-deepsort/func.py | saru-d2/road-tracking-yolov4 | fca383e9ac01db79d6a62c6998e31d6f3b8c60a7 | [
"MIT"
] | null | null | null | ''' functions to test out in jupyter notebook '''
def add(a,b):
return a + b | 26.666667 | 49 | 0.6375 |
c412d76773e2004991cbbcf25eea944509788e2b | 1,175 | py | Python | UniNeuroLab/urls.py | AnuTor/UniNeuroLab | 5825f440d4663650f038083f3da05229cc5ada4f | [
"Apache-2.0"
] | null | null | null | UniNeuroLab/urls.py | AnuTor/UniNeuroLab | 5825f440d4663650f038083f3da05229cc5ada4f | [
"Apache-2.0"
] | null | null | null | UniNeuroLab/urls.py | AnuTor/UniNeuroLab | 5825f440d4663650f038083f3da05229cc5ada4f | [
"Apache-2.0"
] | null | null | null | """UniNeuroLab URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from gestionTomaDatos import views
from gestionProcesamientoDatos import views
from gestionProtocolos_y_Estudios import views
from gestionResultadosProtocolo import views
from gestionPlanificacion_y_Desarrollo_Proyectos_Nuevos import views
from gestionNeuroLab_Panel_de_Control import views
urlpatterns = [
path('admin/', admin.site.urls),
#path('busquedaEstudios/', views.busquedaEstudios),
#path("buscar/",views.buscar),
#path("contacto/", views.contacto),
]
| 37.903226 | 77 | 0.756596 |
065a5c360fddeb82baf076a37c5d3eaf50d5f51e | 2,865 | py | Python | migrations/versions/ecc60e81cf5f_initial_migration.py | Chiuri254/pitch | fd49e9454a636f449fd4f109d4d55c5db5520325 | [
"Unlicense"
] | null | null | null | migrations/versions/ecc60e81cf5f_initial_migration.py | Chiuri254/pitch | fd49e9454a636f449fd4f109d4d55c5db5520325 | [
"Unlicense"
] | null | null | null | migrations/versions/ecc60e81cf5f_initial_migration.py | Chiuri254/pitch | fd49e9454a636f449fd4f109d4d55c5db5520325 | [
"Unlicense"
] | null | null | null | """Initial Migration
Revision ID: ecc60e81cf5f
Revises:
Create Date: 2019-12-02 16:13:00.605474
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ecc60e81cf5f'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.Column('pass_secure', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_table('pitches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.String(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('opinion', sa.String(length=255), nullable=True),
sa.Column('time_posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitches_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitches_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('votes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('vote', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitches_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitches_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('votes')
op.drop_table('comments')
op.drop_table('pitches')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('categories')
# ### end Alembic commands ###
| 36.730769 | 76 | 0.664223 |
9089faa22279c209b0635e9447f6b9072ac10e93 | 111 | py | Python | tests/background/task/hard_fails/cannot_be_lambda.py | FundingOptions/background-tasks-python | 3694d3c5bdafaffebbd6c3d55daa976f4d34ba4c | [
"MIT"
] | null | null | null | tests/background/task/hard_fails/cannot_be_lambda.py | FundingOptions/background-tasks-python | 3694d3c5bdafaffebbd6c3d55daa976f4d34ba4c | [
"MIT"
] | null | null | null | tests/background/task/hard_fails/cannot_be_lambda.py | FundingOptions/background-tasks-python | 3694d3c5bdafaffebbd6c3d55daa976f4d34ba4c | [
"MIT"
] | null | null | null | from fops.background import task
# Lambdas are nameless. Tasks must be discoverable.
x = task()(lambda: None)
| 22.2 | 51 | 0.756757 |
24bbac10e9fd2f46bbd9da5257a3a6edf6f633d3 | 4,030 | py | Python | fediplay.py | pwoolcoc/fediplay | 1be7fa445416438fc6229f683ef5e038e315d7b4 | [
"MIT"
] | 1 | 2019-07-19T20:46:56.000Z | 2019-07-19T20:46:56.000Z | fediplay.py | pwoolcoc/fediplay | 1be7fa445416438fc6229f683ef5e038e315d7b4 | [
"MIT"
] | null | null | null | fediplay.py | pwoolcoc/fediplay | 1be7fa445416438fc6229f683ef5e038e315d7b4 | [
"MIT"
] | null | null | null | from os import environ, umask
import shlex
from subprocess import run
from threading import Thread, Lock
from lxml.etree import HTML
import mastodon
Mastodon = mastodon.Mastodon
from youtube_dl import YoutubeDL
class Queue(object):
def __init__(self):
self.lock = Lock()
self.playing = False
self.queue = []
def add(self, url):
filename = Getter().get(url)
with self.lock:
self.queue.append(filename)
if not self.playing:
self._play(self.queue.pop(0), self._play_finished)
def _play(self, filename, cb_complete):
self.playing = True
def run_thread(filename, cb_complete):
print('==> Playing', filename)
play_command = build_play_command(filename)
print('[executing]', play_command)
run(play_command, shell=True)
print('==> Playback complete')
cb_complete()
thread = Thread(target=run_thread, args=(filename, cb_complete))
thread.start()
def _play_finished(self):
with self.lock:
self.playing = False
if len(self.queue) > 0:
self._play(self.queue.pop(0), self._play_finished)
class Getter(object):
def _progress_hook(self, progress):
if progress['status'] == 'finished':
self.filename = progress['filename']
def get(self, url):
options = {
'format': 'mp3/mp4',
'nocheckcertificate': 'FEDIPLAY_NO_CHECK_CERTIFICATE' in environ,
'progress_hooks': [self._progress_hook]
}
with YoutubeDL(options) as downloader:
downloader.download([url])
return self.filename
class StreamListener(mastodon.StreamListener):
def __init__(self):
self.queue = Queue()
def on_update(self, status):
tags = extract_tags(status)
if 'fediplay' in tags:
links = extract_links(status)
self.queue.add(links[0])
def register(api_base_url):
old_umask = umask(0o77)
Mastodon.create_app('fediplay', api_base_url=api_base_url, to_file='clientcred.secret')
umask(old_umask)
def login(api_base_url, email, password):
client = Mastodon(client_id='clientcred.secret', api_base_url=api_base_url)
old_umask = umask(0o77)
client.log_in(email, password, to_file='usercred.secret')
umask(old_umask)
def stream(api_base_url):
client = Mastodon(client_id='clientcred.secret', access_token='usercred.secret', api_base_url=api_base_url)
listener = StreamListener()
print('==> Streaming from', api_base_url)
client.stream_user(listener)
def extract_tags(toot):
return [tag['name'] for tag in toot['tags']]
def link_is_internal(link):
classes = link.attrib.get('class', '').split(' ')
if classes:
return 'mention' in classes
return False
def extract_links(toot):
html = HTML(toot['content'])
all_links = html.cssselect('a')
return [link.attrib['href'] for link in all_links if not link_is_internal(link)]
def build_play_command(filename):
escaped_filename = shlex.quote(filename)
template = environ.get(
'FEDIPLAY_PLAY_COMMAND',
'ffplay -v 0 -nostats -hide_banner -autoexit -nodisp {filename}'
)
return template.format(filename=escaped_filename)
def main():
from getpass import getpass
from os import path
from sys import exit
api_base_url = environ.get('FEDIPLAY_API_BASE_URL')
if not api_base_url:
print('FEDIPLAY_API_BASE_URL environment variable not set')
exit(1)
if not path.exists('clientcred.secret'):
print('==> No clientcred.secret; registering application')
register(api_base_url)
if not path.exists('usercred.secret'):
print('==> No usercred.secret; logging in')
email = input('Email: ')
password = getpass('Password: ')
login(api_base_url, email, password)
stream(api_base_url)
if __name__ == '__main__':
main()
| 29.851852 | 111 | 0.647146 |
971a73338733cb6eddc82321f4e678558291f729 | 992 | py | Python | moex/migrations/0018_auto_20200704_1120.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | 2 | 2020-09-08T12:51:56.000Z | 2021-08-18T15:27:52.000Z | moex/migrations/0018_auto_20200704_1120.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | 1 | 2021-12-13T20:43:35.000Z | 2021-12-13T20:43:35.000Z | moex/migrations/0018_auto_20200704_1120.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-07-04 08:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moex', '0017_auto_20200704_1115'),
]
operations = [
migrations.AlterField(
model_name='security',
name='code',
field=models.CharField(blank=True, max_length=30, null=True, unique=True),
),
migrations.AlterField(
model_name='security',
name='isin',
field=models.CharField(blank=True, max_length=30, null=True, unique=True),
),
migrations.AlterField(
model_name='security',
name='regnumber',
field=models.CharField(blank=True, max_length=30, null=True, unique=True),
),
migrations.AlterField(
model_name='security',
name='secid',
field=models.CharField(blank=True, max_length=30, null=True, unique=True),
),
]
| 29.176471 | 86 | 0.580645 |
a2f2e0a91a0e0e8420713a7cb701ac13842cd476 | 22,080 | py | Python | tensorflow_quantum/python/differentiators/gradient_test.py | quantummind/quantum | fd952d0362c5445eef0da4437fb3e5ebb16b7948 | [
"Apache-2.0"
] | 1,501 | 2020-03-09T00:40:31.000Z | 2022-03-28T19:59:57.000Z | tensorflow_quantum/python/differentiators/gradient_test.py | quantummind/quantum | fd952d0362c5445eef0da4437fb3e5ebb16b7948 | [
"Apache-2.0"
] | 381 | 2020-03-09T18:31:04.000Z | 2022-03-28T18:47:32.000Z | tensorflow_quantum/python/differentiators/gradient_test.py | quantummind/quantum | fd952d0362c5445eef0da4437fb3e5ebb16b7948 | [
"Apache-2.0"
] | 410 | 2020-03-09T03:05:48.000Z | 2022-03-31T12:08:14.000Z | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing for gradient calculation consistency in TFQ."""
import copy
import numpy as np
import sympy
import tensorflow as tf
from absl.testing import parameterized
import cirq
from tensorflow_quantum.python import util
from tensorflow_quantum.python.differentiators import adjoint
from tensorflow_quantum.python.differentiators import linear_combination
from tensorflow_quantum.python.differentiators import parameter_shift
from tensorflow_quantum.core.ops import circuit_execution_ops, batch_util
from tensorflow_quantum.core.ops.noise import noisy_expectation_op
from tensorflow_quantum.core.ops.noise import noisy_sampled_expectation_op
ANALYTIC_DIFFS = [
linear_combination.ForwardDifference(grid_spacing=0.0001),
linear_combination.ForwardDifference(error_order=2, grid_spacing=0.0001),
linear_combination.CentralDifference(grid_spacing=0.0001),
linear_combination.CentralDifference(error_order=4, grid_spacing=0.0001),
parameter_shift.ParameterShift(),
]
SAMPLED_DIFFS = [
linear_combination.ForwardDifference(grid_spacing=0.05),
linear_combination.CentralDifference(grid_spacing=0.05),
parameter_shift.ParameterShift(),
]
SAMPLED_DIFFS_TOLS = [0.5, 0.5, 0.2]
ANALYTIC_OPS = [
circuit_execution_ops.get_expectation_op(cirq.sim.Simulator()), # WF
circuit_execution_ops.get_expectation_op() # C++
]
SAMPLED_OPS = [
circuit_execution_ops.get_sampled_expectation_op(
cirq.sim.Simulator()), # WF
circuit_execution_ops.get_sampled_expectation_op() # C++
]
NOISY_OPS = [
noisy_sampled_expectation_op.sampled_expectation,
noisy_expectation_op.expectation
]
def _cirq_simple_finite_difference(circuit_batch,
resolvers,
symbol_names,
op_batch,
simulator,
grid_spacing=0.0001):
"""A simple finite difference code that calculates the gradient of a
batch of circuits using cirq."""
init_vals = batch_util.batch_calculate_expectation(circuit_batch, resolvers,
op_batch, simulator)
grad_circuits = []
grad_resolvers = []
grad_pauli_sums = []
for this_program, this_pauli_sums, this_resolver in \
zip(circuit_batch, op_batch, resolvers):
for symbol in symbol_names:
perturbed_resolver = copy.deepcopy(this_resolver)
perturbed_resolver.param_dict[symbol] += grid_spacing
grad_circuits.append(this_program)
grad_pauli_sums.append(this_pauli_sums)
grad_resolvers.append(perturbed_resolver)
# shape: [n_programs * len(symbol_names), n_pauli_sums]
results = np.array(
batch_util.batch_calculate_expectation(circuits=grad_circuits,
param_resolvers=grad_resolvers,
ops=grad_pauli_sums,
simulator=simulator))
# shape: [n_pauli_sums, n_programs, len(symbol_names)]
gradient_generator = results.transpose().reshape(
(len(op_batch[0]), len(circuit_batch), len(symbol_names)))
# shape: [n_pauli_sums, n_programs, len(symbol_names)]
forward_pass_vals = np.transpose(
np.vstack([np.expand_dims(init_vals, axis=0)] * len(symbol_names)),
(2, 1, 0))
return np.sum(1 / grid_spacing * (gradient_generator - forward_pass_vals),
axis=0)
class AnalyticGradientCorrectnessTest(tf.test.TestCase, parameterized.TestCase):
"""Test correctness of the differentiators to reference cirq algorithm."""
@parameterized.parameters(
list(
util.kwargs_cartesian_product(**{
'differentiator': ANALYTIC_DIFFS,
'op': ANALYTIC_OPS
})) + [{
'differentiator': adjoint.Adjoint(),
'op': circuit_execution_ops.get_expectation_op()
}])
def test_backprop(self, differentiator, op):
"""Test that gradients are correctly backpropagated through a quantum
circuit via comparison to analytical results.
"""
differentiator.refresh()
op = differentiator.generate_differentiable_op(analytic_op=op)
def exact_grad(theta):
new_theta = 2 * np.pi * theta
return -2 * np.pi * np.sin(new_theta) * np.exp(np.cos(new_theta))
bit = cirq.GridQubit(0, 0)
circuits = util.convert_to_tensor(
[cirq.Circuit(cirq.X(bit)**sympy.Symbol('rx')) for _ in range(2)])
pstring = util.convert_to_tensor([[
cirq.PauliSum.from_pauli_strings([cirq.PauliString({bit: cirq.Z})])
] for _ in circuits])
base_rot_angles = tf.constant([[0.25], [0.125]])
with tf.GradientTape() as g:
g.watch(base_rot_angles)
input_angles = 2 * base_rot_angles
exp_res = tf.exp(
op(circuits, tf.convert_to_tensor(['rx']), input_angles,
pstring))
grad = g.gradient(exp_res, base_rot_angles)
exact = [[exact_grad(0.25)], [exact_grad(0.125)]]
# will this be too tight? time will tell.
self.assertAllClose(exact, grad.numpy(), rtol=0.01, atol=0.01)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'differentiator': ANALYTIC_DIFFS,
'op': ANALYTIC_OPS,
'n_qubits': [5],
'n_programs': [3],
'n_ops': [3],
'symbol_names': [['a', 'b']]
})) + [{
'differentiator': adjoint.Adjoint(),
'op': circuit_execution_ops.get_expectation_op(),
'n_qubits': 10,
'n_programs': 5,
'n_ops': 3,
'symbol_names': ['a', 'b']
}])
def test_gradients_vs_cirq_finite_difference(self, differentiator, op,
n_qubits, n_programs, n_ops,
symbol_names):
"""Compare TFQ differentiators to fine-grained noiseless cirq finite
differencing.
"""
differentiator.refresh()
op = differentiator.generate_differentiable_op(analytic_op=op)
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)
psums = [
util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch],
dtype=np.float32)
# calculate tfq gradient
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
programs = util.convert_to_tensor(circuit_batch)
ops = util.convert_to_tensor(psums)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(programs, tf.convert_to_tensor(symbol_names),
symbol_values_tensor, ops)
tfq_grads = g.gradient(expectations, symbol_values_tensor)
# calculate gradients in cirq using a very simple forward differencing
# scheme
cirq_grads = _cirq_simple_finite_difference(circuit_batch,
resolver_batch,
symbol_names, psums,
cirq.Simulator())
# will this be too tight? time will tell.
self.assertAllClose(cirq_grads, tfq_grads, rtol=2e-2, atol=2e-2)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(**{
'differentiator': ANALYTIC_DIFFS,
'op': ANALYTIC_OPS,
})) + [{
'differentiator': adjoint.Adjoint(),
'op': circuit_execution_ops.get_expectation_op(),
}])
def test_analytic_value_with_simple_circuit(self, differentiator, op):
"""Test the value of differentiator with simple circuit."""
# Get an expectation op, with this differentiator attached.
differentiator.refresh()
op = differentiator.generate_differentiable_op(analytic_op=op)
qubit = cirq.GridQubit(0, 0)
circuit = util.convert_to_tensor(
[cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))])
psums = util.convert_to_tensor([[cirq.Z(qubit)]])
symbol_values_array = np.array([[0.123]], dtype=np.float32)
# Calculate tfq gradient.
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(circuit, tf.convert_to_tensor(['alpha']),
symbol_values_tensor, psums)
grads = g.gradient(expectations, symbol_values_tensor)
ground_truth_grads = np.array([[-1.1839752]])
self.assertAllClose(ground_truth_grads, grads, rtol=1e-2, atol=1e-2)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(**{
'differentiator': ANALYTIC_DIFFS,
'op': ANALYTIC_OPS,
})) + [{
'differentiator': adjoint.Adjoint(),
'op': circuit_execution_ops.get_expectation_op(),
}])
def test_empty_circuit_grad(self, differentiator, op):
"""Test that providing no circuits will fail gracefully."""
differentiator.refresh()
op = differentiator.generate_differentiable_op(analytic_op=op)
circuit = tf.convert_to_tensor([], dtype=tf.string)
psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
# Calculate tfq gradient.
symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(circuit, symbol_names_tensor,
symbol_values_tensor, psums)
grads = g.gradient(expectations, symbol_values_tensor)
self.assertShapeEqual(grads.numpy(),
tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))
class SampledGradientCorrectnessTest(tf.test.TestCase, parameterized.TestCase):
"""Test approximate correctness to sampled methods."""
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'differentiator': SAMPLED_DIFFS,
'op': SAMPLED_OPS,
'num_samples': [20000]
})))
def test_sampled_value_with_simple_circuit(self, differentiator, op,
num_samples):
"""Test the value of sampled differentiator with simple circuit."""
# Get an expectation op, with this differentiator attached.
differentiator.refresh()
op = differentiator.generate_differentiable_op(sampled_op=op)
qubit = cirq.GridQubit(0, 0)
circuit = util.convert_to_tensor(
[cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))])
psums = util.convert_to_tensor([[cirq.Z(qubit)]])
symbol_values_array = np.array([[0.123]], dtype=np.float32)
# Calculate tfq gradient.
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(circuit, tf.convert_to_tensor(['alpha']),
symbol_values_tensor, psums,
tf.convert_to_tensor([[num_samples]]))
grads = g.gradient(expectations, symbol_values_tensor)
ground_truth_grads = np.array([[-1.1839752]])
self.assertAllClose(ground_truth_grads, grads, rtol=0.2, atol=0.2)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'diff_and_tol': zip(SAMPLED_DIFFS, SAMPLED_DIFFS_TOLS),
'op': SAMPLED_OPS,
'n_qubits': [3],
'n_programs': [5],
'n_ops': [2],
'symbol_names': [['a', 'b']],
'num_samples': [30000]
})))
def test_approx_equality_shallow(self, diff_and_tol, op, n_qubits,
symbol_names, n_ops, n_programs,
num_samples):
"""Test small circuits with limited depth."""
differentiator, tol = diff_and_tol
differentiator.refresh()
op = differentiator.generate_differentiable_op(sampled_op=op)
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)
# Prepare random pauli sums and add initial superposition gates.
psums = []
for i in range(len(circuit_batch)):
psums.append(util.random_pauli_sums(qubits, 1, n_ops))
circuit_batch[i] = cirq.Circuit(
cirq.H.on_each(qubits)) + circuit_batch[i]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch],
dtype=np.float32)
# calculate tfq gradient
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
programs = util.convert_to_tensor(circuit_batch)
ops = util.convert_to_tensor(psums)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(
programs, tf.convert_to_tensor(symbol_names),
symbol_values_tensor, ops,
tf.convert_to_tensor([[num_samples] * n_ops] * n_programs))
tfq_grads = g.gradient(expectations, symbol_values_tensor)
# calculate gradients in cirq using a very simple forward differencing
# scheme
cirq_grads = _cirq_simple_finite_difference(circuit_batch,
resolver_batch,
symbol_names, psums,
cirq.Simulator())
self.assertAllClose(cirq_grads, tfq_grads, rtol=tol, atol=tol)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(**{
'differentiator': SAMPLED_DIFFS,
'op': SAMPLED_OPS,
})))
def test_empty_circuit_sampled_grad(self, differentiator, op):
"""Test that providing no circuits will fail gracefully."""
differentiator.refresh()
op = differentiator.generate_differentiable_op(sampled_op=op)
circuit = tf.convert_to_tensor([], dtype=tf.string)
psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
# Calculate tfq gradient.
symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)
n_samples_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.int32)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(circuit, symbol_names_tensor,
symbol_values_tensor, psums, n_samples_tensor)
grads = g.gradient(expectations, symbol_values_tensor)
self.assertShapeEqual(grads.numpy(),
tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))
class NoisyGradientCorrectnessTest(tf.test.TestCase, parameterized.TestCase):
"""Test approximate correctness of noisy methods."""
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'differentiator': SAMPLED_DIFFS,
'op': NOISY_OPS,
'num_samples': [20000]
})))
def test_sampled_value_with_simple_circuit(self, differentiator, op,
num_samples):
"""Test the value of sampled differentiator with simple circuit."""
# Get an expectation op, with this differentiator attached.
differentiator.refresh()
op = differentiator.generate_differentiable_op(sampled_op=op)
qubit = cirq.GridQubit(0, 0)
circuit = util.convert_to_tensor(
[cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))])
psums = util.convert_to_tensor([[cirq.Z(qubit)]])
symbol_values_array = np.array([[0.123]], dtype=np.float32)
# Calculate tfq gradient.
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(circuit, tf.convert_to_tensor(['alpha']),
symbol_values_tensor, psums,
tf.convert_to_tensor([[num_samples]]))
grads = g.gradient(expectations, symbol_values_tensor)
ground_truth_grads = np.array([[-1.1839752]])
self.assertAllClose(ground_truth_grads, grads, rtol=0.2, atol=0.2)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'diff_and_tol': zip(SAMPLED_DIFFS, SAMPLED_DIFFS_TOLS),
'op': NOISY_OPS,
'n_qubits': [5],
'n_programs': [5],
'n_ops': [2],
'symbol_names': [['a', 'b']],
'num_samples': [30000]
})))
def test_approx_equality_shallow(self, diff_and_tol, op, n_qubits,
symbol_names, n_ops, n_programs,
num_samples):
"""Test small circuits with limited depth."""
differentiator, tol = diff_and_tol
differentiator.refresh()
op = differentiator.generate_differentiable_op(sampled_op=op)
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs,
include_channels=True)
# Prepare random pauli sums and add initial superposition gates.
psums = []
for i in range(len(circuit_batch)):
psums.append(util.random_pauli_sums(qubits, 1, n_ops))
circuit_batch[i] = cirq.Circuit(
cirq.H.on_each(qubits)) + circuit_batch[i]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch],
dtype=np.float32)
# calculate tfq gradient
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
programs = util.convert_to_tensor(circuit_batch)
ops = util.convert_to_tensor(psums)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(
programs, tf.convert_to_tensor(symbol_names),
symbol_values_tensor, ops,
tf.convert_to_tensor([[num_samples] * n_ops] * n_programs))
tfq_grads = g.gradient(expectations, symbol_values_tensor)
cirq_grads = _cirq_simple_finite_difference(
circuit_batch, resolver_batch, symbol_names, psums,
cirq.DensityMatrixSimulator())
self.assertAllClose(cirq_grads, tfq_grads, rtol=tol, atol=tol)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(**{
'differentiator': SAMPLED_DIFFS,
'op': NOISY_OPS,
})))
def test_empty_circuit_sampled_grad(self, differentiator, op):
"""Test that providing no circuits will fail gracefully."""
differentiator.refresh()
op = differentiator.generate_differentiable_op(sampled_op=op)
circuit = tf.convert_to_tensor([], dtype=tf.string)
psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
# Calculate tfq gradient.
symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)
n_samples_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.int32)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
expectations = op(circuit, symbol_names_tensor,
symbol_values_tensor, psums, n_samples_tensor)
grads = g.gradient(expectations, symbol_values_tensor)
self.assertShapeEqual(grads.numpy(),
tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))
if __name__ == '__main__':
tf.test.main()
| 43.636364 | 80 | 0.603034 |
b16ec656e69fa3d0df0f9887ace3bc9112387235 | 7,525 | py | Python | eventtools/tests/_fixture.py | ixc/glamkit-eventtools | f94726c145f52bb7771b1c5352a39903d5fa33f3 | [
"BSD-3-Clause"
] | 9 | 2015-03-02T21:14:53.000Z | 2019-10-28T09:08:38.000Z | eventtools/tests/_fixture.py | ixc/glamkit-eventtools | f94726c145f52bb7771b1c5352a39903d5fa33f3 | [
"BSD-3-Clause"
] | null | null | null | eventtools/tests/_fixture.py | ixc/glamkit-eventtools | f94726c145f52bb7771b1c5352a39903d5fa33f3 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T19:05:28.000Z | 2019-12-09T19:05:28.000Z | from dateutil.relativedelta import *
from eventtools.models import Rule
from eventtools_testapp.models import *
from eventtools.utils.dateranges import *
from datetime import datetime, date, timedelta
def fixture(obj):
#some simple events
obj.talk = ExampleEvent.eventobjects.create(title="Curator's Talk", slug="curators-talk")
obj.performance = ExampleEvent.eventobjects.create(title="A performance", slug="performance")
#some useful dates
obj.day1 = date(2010,10,10)
obj.day2 = obj.day1+timedelta(1)
#some simple occurrences
obj.talk_morning = ExampleOccurrence.objects.create(event=obj.talk, start=datetime(2010,10,10,10,00))
obj.talk_afternoon = ExampleOccurrence.objects.create(event=obj.talk, start=datetime(2010,10,10,14,00))
obj.talk_tomorrow_morning_cancelled = ExampleOccurrence.objects.create(event=obj.talk, start=datetime(2010,10,11,10,00), status='cancelled')
obj.performance_evening = ExampleOccurrence.objects.create(event=obj.performance, start=datetime(2010,10,10,20,00))
obj.performance_tomorrow = ExampleOccurrence.objects.create(event=obj.performance, start=datetime(2010,10,11,20,00))
obj.performance_day_after_tomorrow = ExampleOccurrence.objects.create(event=obj.performance, start=datetime(2010,10,12,20,00))
#an event with many occurrences
# deleting the 2nd jan, because we want to test it isn't displayed
obj.daily_tour = ExampleEvent.eventobjects.create(title="Daily Tour", slug="daily-tour")
for day in range(50):
if day !=1: #2nd of month.
d = date(2010,1,1) + timedelta(day)
obj.daily_tour.occurrences.create(start=d)
obj.weekly_talk = ExampleEvent.eventobjects.create(title="Weekly Talk", slug="weekly-talk")
for day in range(50):
d = date(2010,1,1) + timedelta(day*7)
obj.weekly_talk.occurrences.create(start=datetime.combine(d, time(10,00)), _duration=240)
#an event with some variations
obj.film = ExampleEvent.eventobjects.create(title="Film Night", slug="film-night")
obj.film_with_popcorn = ExampleEvent.eventobjects.create(parent=obj.film, title="Film Night", slug="film-night-2", difference_from_parent="free popcorn")
obj.film_with_talk = ExampleEvent.eventobjects.create(parent=obj.film, title="Film Night", slug="film-night-talk", difference_from_parent="director's talk")
obj.film_with_talk_and_popcorn = ExampleEvent.eventobjects.create(parent=obj.film_with_talk, title="Film Night", slug="film-with-talk-and-popcorn", difference_from_parent="popcorn and director's talk")
# obj.film_with_popcorn.move_to(obj.film, position='first-child')
# obj.film_with_talk.move_to(obj.film, position='first-child')
# obj.film_with_talk_and_popcorn.move_to(obj.film_with_talk, position='first-child')
# the mptt gotcha. reload the parents
reload_films(obj)
obj.film_occ = obj.film.occurrences.create(start=datetime(2010,10,10,18,30))
obj.film_occ.save()
obj.film_with_popcorn_occ = obj.film_with_popcorn.occurrences.create(start=datetime(2010,10,11,18,30))
obj.film_with_talk_occ = obj.film_with_talk.occurrences.create(start=datetime(2010,10,12,18,30))
obj.film_with_talk_and_popcorn_occ = obj.film_with_talk_and_popcorn.occurrences.create(start=datetime(2010,10,13,18,30))
def generator_fixture(obj):
#TestEvents with generators (separate models to test well)
obj.weekly = Rule.objects.create(frequency = "WEEKLY")
obj.daily = Rule.objects.create(frequency = "DAILY")
obj.yearly = Rule.objects.create(frequency = "YEARLY")
obj.bin_night = ExampleEvent.eventobjects.create(title='Bin Night')
obj.weekly_generator = obj.bin_night.generators.create(start=datetime(2010,1,8,10,30), _duration=60, rule=obj.weekly, repeat_until=date(2010,2,5))
#this should create 0 occurrences, since it is a duplicate of weekly.
obj.dupe_weekly_generator = obj.bin_night.generators.create(start=datetime(2010,1,8,10,30), _duration=60, rule=obj.weekly, repeat_until=date(2010,2,5))
obj.endless_generator = obj.bin_night.generators.create(start=datetime(2010,1,2,10,30), _duration=60, rule=obj.weekly)
obj.all_day_generator = obj.bin_night.generators.create(start=datetime(2010,1,4,0,0), rule=obj.weekly, repeat_until=date(2010,1,25))
def reload_films(obj):
obj.film = obj.film.reload()
obj.film_with_popcorn = obj.film_with_popcorn.reload()
obj.film_with_talk = obj.film_with_talk.reload()
obj.film_with_talk_and_popcorn = obj.film_with_talk_and_popcorn.reload()
def bigfixture(obj):
# have to create some more events since we are working from 'today'.
obj.pe = ExampleEvent.eventobjects.create(title="proliferating event")
obj.todaynow = datetime.now()
obj.today = date.today()
obj.tomorrow = obj.today + timedelta(1)
obj.yesterday = obj.today - timedelta(1)
obj.this_week = dates_in_week_of(obj.today)
obj.last_week = dates_in_week_of(obj.today-timedelta(7))
obj.next_week = dates_in_week_of(obj.today+timedelta(7))
obj.this_weekend = dates_in_weekend_of(obj.today)
obj.last_weekend = dates_in_weekend_of(obj.today-timedelta(7))
obj.next_weekend = dates_in_weekend_of(obj.today+timedelta(7))
obj.this_fortnight = dates_in_fortnight_of(obj.today)
obj.last_fortnight = dates_in_fortnight_of(obj.today-timedelta(14))
obj.next_fortnight = dates_in_fortnight_of(obj.today+timedelta(14))
obj.this_month = dates_in_month_of(obj.today)
obj.last_month = dates_in_month_of(obj.today+relativedelta(months=-1))
obj.next_month = dates_in_month_of(obj.today+relativedelta(months=+1))
obj.this_year = dates_in_year_of(obj.today)
obj.last_year = dates_in_year_of(obj.today+relativedelta(years=-1))
obj.next_year = dates_in_year_of(obj.today+relativedelta(years=+1))
obj.now = datetime.now().time()
obj.hence1 = (datetime.now() + timedelta(seconds=600)).time()
obj.hence2 = (datetime.now() + timedelta(seconds=1200)).time()
obj.earlier1 = (datetime.now() - timedelta(seconds=600)).time()
obj.earlier2 = (datetime.now() - timedelta(seconds=1200)).time()
#on each of the given days, we'll create 5 occurrences:
# all day
# earlier
# hence
# current
# multiday
present_days = \
obj.this_week + \
obj.this_weekend + \
obj.this_fortnight + \
obj.this_month + \
obj.this_year + \
[obj.today]
past_days = \
obj.last_week + \
obj.last_weekend + \
obj.last_fortnight + \
obj.last_month + \
obj.last_year + \
[obj.yesterday]
future_days = \
obj.next_week + \
obj.next_weekend + \
obj.next_fortnight + \
obj.next_month + \
obj.next_year + \
[obj.tomorrow]
for day in present_days + past_days + future_days:
#all day
obj.pe.occurrences.create(start=day)
# earlier
obj.pe.occurrences.create(start=datetime.combine(day, obj.earlier2), end=datetime.combine(day, obj.earlier1))
# later
obj.pe.occurrences.create(start=datetime.combine(day, obj.hence1), end=datetime.combine(day, obj.hence2))
# now-ish
obj.pe.occurrences.create(start=datetime.combine(day, obj.earlier1), end=datetime.combine(day, obj.hence1))
# multiday
obj.pe.occurrences.create(start=datetime.combine(day, obj.earlier1), end=datetime.combine(day+timedelta(1), obj.hence1)) | 47.929936 | 205 | 0.714684 |
35e1b0fa1312099074053aed4258d9dbefa0f0ad | 4,510 | py | Python | flask_sqlalchemy_bundle/meta/base_model_metaclass.py | briancappello/flask-sqlalchemy-bundle | 8150896787907ef0001839b5a6ef303edccb9b6c | [
"MIT"
] | null | null | null | flask_sqlalchemy_bundle/meta/base_model_metaclass.py | briancappello/flask-sqlalchemy-bundle | 8150896787907ef0001839b5a6ef303edccb9b6c | [
"MIT"
] | null | null | null | flask_sqlalchemy_bundle/meta/base_model_metaclass.py | briancappello/flask-sqlalchemy-bundle | 8150896787907ef0001839b5a6ef303edccb9b6c | [
"MIT"
] | null | null | null | import re
from collections import defaultdict
from flask_sqlalchemy.model import DefaultMeta, should_set_tablename
from flask_unchained.string_utils import snake_case
from flask_unchained.utils import deep_getattr
from sqlalchemy import Column
from .model_meta_factory import ModelMetaFactory
from .model_registry import _model_registry
from .types import McsArgs, McsInitArgs
VALIDATOR_RE = re.compile(r'^validates?_(?P<column>\w+)')
class BaseModelMetaclass(DefaultMeta):
def __new__(mcs, name, bases, clsdict):
mcs_args = McsArgs(mcs, name, bases, clsdict)
_model_registry._ensure_correct_base_model(mcs_args)
ModelMetaFactoryClass = deep_getattr(
clsdict, mcs_args.bases, '_meta_factory_class', ModelMetaFactory)
model_meta_factory: ModelMetaFactory = ModelMetaFactoryClass()
model_meta_factory._contribute_to_class(mcs_args)
if model_meta_factory.abstract:
return super().__new__(*mcs_args)
validators = deep_getattr(clsdict, mcs_args.bases, '__validators__',
defaultdict(list))
columns = {col_name: col for col_name, col in clsdict.items()
if isinstance(col, Column)}
for col_name, col in columns.items():
if not col.name:
col.name = col_name
if col.info:
for v in col.info.get('validators', []):
if v not in validators[col_name]:
validators[col_name].append(v)
for attr_name, attr in clsdict.items():
validates = getattr(attr, '__validates__', None)
if validates and deep_getattr(clsdict, mcs_args.bases, validates):
if attr_name not in validators[attr.__validates__]:
validators[attr.__validates__].append(attr_name)
continue
m = VALIDATOR_RE.match(attr_name)
column = m.groupdict()['column'] if m else None
if m and deep_getattr(clsdict, mcs_args.bases, column, None) is not None:
attr.__validates__ = column
if attr_name not in validators[column]:
validators[column].append(attr_name)
clsdict['__validators__'] = validators
_model_registry.register_new(mcs_args)
return super().__new__(*mcs_args)
def __init__(cls, name, bases, clsdict):
# for some as-yet-not-understood reason, the arguments python passes
# to __init__ do not match those we gave to __new__ (namely, the
# bases parameter passed to __init__ is what the class was declared
# with, instead of the new bases the model_registry determined it
# should have. and in fact, __new__ does the right thing - it uses
# the correct bases, and the generated class has the correct bases,
# yet still, the ones passed to __init__ are wrong. however at this
# point (inside __init__), because the class has already been
# constructed, changing the bases argument doesn't seem to have any
# effect (and that agrees with what conceptually should be the case).
# Sooo, we're passing the correct arguments up the chain, to reduce
# confusion, just in case anybody needs to inspect them)
_, name, bases, clsdict = cls._meta._mcs_args
if cls._meta.abstract:
super().__init__(name, bases, clsdict)
if should_set_tablename(cls):
cls.__tablename__ = snake_case(cls.__name__)
if not cls._meta.abstract and not cls._meta.lazy_mapped:
cls._pre_mcs_init()
super().__init__(name, bases, clsdict)
cls._post_mcs_init()
if not cls._meta.abstract:
_model_registry.register(McsInitArgs(cls, name, bases, clsdict))
def _pre_mcs_init(cls):
"""
Callback for BaseModelMetaclass subclasses to run code just before a
concrete Model class gets registered with SQLAlchemy.
This is intended to be used for advanced meta options implementations.
"""
# technically you could also put a @classmethod with the same name on
# the Model class, if you prefer that approach
def _post_mcs_init(cls):
"""
Callback for BaseModelMetaclass subclasses to run code just after a
concrete Model class gets registered with SQLAlchemy.
This is intended to be used for advanced meta options implementations.
"""
| 42.952381 | 85 | 0.660976 |
bcc717c42fb63746c480096b4162c8e84a132500 | 752 | py | Python | game_finder/migrations/0002_auto_20171117_1833.py | stuart-bradley/steam_lan_game_finder | 345cc02b64da7269887dcce7014ddb5d5b6194bd | [
"MIT"
] | 1 | 2018-01-09T11:44:54.000Z | 2018-01-09T11:44:54.000Z | game_finder/migrations/0002_auto_20171117_1833.py | lutrasdebtra/steam_lan_game_finder | 345cc02b64da7269887dcce7014ddb5d5b6194bd | [
"MIT"
] | 1 | 2020-04-26T19:52:31.000Z | 2020-04-26T19:52:31.000Z | game_finder/migrations/0002_auto_20171117_1833.py | lutrasdebtra/steam_lan_game_finder | 345cc02b64da7269887dcce7014ddb5d5b6194bd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-17 05:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('game_finder', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='game',
name='created_date',
field=models.DateTimeField(auto_now_add=True,
default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='game',
name='modified_date',
field=models.DateTimeField(auto_now=True),
),
]
| 26.857143 | 74 | 0.586436 |
588f1e71d7c21acfc61f5a892a0d8b682a977ae1 | 4,092 | py | Python | type_classgen.py | rombie/contrail-generateDS | f8666e9b5ffe1502830197d31b645dd83e47b754 | [
"MIT"
] | null | null | null | type_classgen.py | rombie/contrail-generateDS | f8666e9b5ffe1502830197d31b645dd83e47b754 | [
"MIT"
] | null | null | null | type_classgen.py | rombie/contrail-generateDS | f8666e9b5ffe1502830197d31b645dd83e47b754 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
class TypeClassGenerator(object):
def __init__(self, cTypeDict):
self._cTypeDict = cTypeDict
self._generated_types = { }
def _GenerateTypeSub(self, file, ctype):
depend_types = ctype.getDependentTypes()
for child in depend_types:
if not child.getName() in self._generated_types:
self._generated_types[child.getName()] = child
self._GenerateTypeSub(file, child)
file.write('\nstruct %s : public AutogenProperty {\n' % ctype.getName())
file.write(' virtual ~%s();\n' % ctype.getName())
members = ctype.getDataMembers()
if len(members) == 1 and members[0].isSequence:
member = members[0]
cdecl = """
typedef %(vectype)s::const_iterator const_iterator;
const_iterator begin() const { return %(membername)s.begin(); }
const_iterator end() const { return %(membername)s.end(); }
""" % {'vectype': member.ctypename, 'membername': member.membername}
file.write(cdecl)
for member in ctype.getDataMembers():
file.write(' %s %s;\n' % (member.ctypename, member.membername))
tail = """
void Clear();
void Copy(const %s &rhs);
bool XmlParse(const pugi::xml_node &node);
static bool XmlParseProperty(const pugi::xml_node &node,
std::auto_ptr<AutogenProperty> *resultp);
void Encode(pugi::xml_node *node) const;
};
""" % (ctype.getName())
file.write(tail)
def GenerateType(self, file, ctype):
if not ctype.getName() in self._generated_types:
self._generated_types[ctype.getName()] = ctype
self._GenerateTypeSub(file, ctype)
def Generate(self, file, ctype):
header = """
// autogenerated file --- DO NOT EDIT ---
#include <iostream>
#include <string.h>
#include <vector>
#include <boost/dynamic_bitset.hpp>
namespace pugi {
class xml_node;
class xml_document;
} // namespace pugi
#include "ifmap/autogen.h"
namespace autogen {
"""
file.write(header)
self.GenerateType(file, ctype)
file.write('} // namespace autogen\n')
pass
class TypeImplGenerator(object):
def __init__(self, cTypeDict):
self._cTypeDict = cTypeDict
pass
def GenerateType(self, file, ctype):
destruct = """
%(class)s::~%(class)s() {
}
""" % {'class': ctype.getName()}
file.write(destruct)
cleardef = """
void %s::Clear() {
""" % ctype.getName()
file.write(cleardef)
for member in ctype.getDataMembers():
cpptype = member.ctypename
if (cpptype == 'int' or
cpptype == 'uint64_t' or
cpptype == 'time_t'):
file.write(' %s = 0;\n' % member.membername)
elif cpptype == 'bool':
file.write(' %s = false;\n' % member.membername)
elif member.isComplex and not member.isSequence:
file.write(' %s.Clear();\n' % member.membername)
else:
file.write(' %s.clear();\n' % member.membername)
file.write('};\n')
copydef = """
void %s::Copy(const %s &rhs) {
""" % (ctype.getName(), ctype.getName())
file.write(copydef)
for member in ctype.getDataMembers():
cpptype = member.ctypename
if member.isComplex and not member.isSequence:
fmt = ' %s.Copy(rhs.%s);\n'
else:
fmt = ' %s = rhs.%s;\n'
file.write(fmt % (member.membername, member.membername))
file.write('};\n')
def Generate(self, hdrname, file):
header = """
// autogenerated file --- DO NOT EDIT ---
#include "%s"
#include <boost/bind.hpp>
#include "ifmap/autogen.h"
#include <pugixml/pugixml.hpp>
using namespace std;
namespace autogen {
""" % hdrname
file.write(header)
for ctype in self._cTypeDict.values():
self.GenerateType(file, ctype)
file.write('}\n')
| 30.311111 | 80 | 0.57869 |
7071cb9b546f67ba34702f2cde10f5776e640e8e | 457 | py | Python | xendit/models/ewallet/ovo/ovo_payment_status.py | adyaksaw/xendit-python | 47b05f2a6582104a274dc12a172c6421de86febc | [
"MIT"
] | 10 | 2020-10-31T23:34:34.000Z | 2022-03-08T19:08:55.000Z | xendit/models/ewallet/ovo/ovo_payment_status.py | adyaksaw/xendit-python | 47b05f2a6582104a274dc12a172c6421de86febc | [
"MIT"
] | 22 | 2020-07-30T14:25:07.000Z | 2022-03-31T03:55:46.000Z | xendit/models/ewallet/ovo/ovo_payment_status.py | adyaksaw/xendit-python | 47b05f2a6582104a274dc12a172c6421de86febc | [
"MIT"
] | 11 | 2020-07-28T08:09:40.000Z | 2022-03-18T00:14:02.000Z | from xendit.models._base_model import BaseModel
class OVOPaymentStatus(BaseModel):
"""Payment Status for OVO (API Reference: eWallets)
Attributes:
- amount (str)
- business_id (str)
- ewallet_type (str)
- external_id (str)
- status (str)
- transaction_date (str) (ISO 8601 Date)
"""
amount: str
business_id: str
ewallet_type: str
external_id: str
status: str
transaction_date: str
| 20.772727 | 55 | 0.63895 |
02b192f487778ba1cc6a465d613c339554afe365 | 18,202 | py | Python | Tools/ccbench/ccbench.py | cocoatomo/Python3.2_C_API_Tutorial | e33d4a285429935aca3178dc2a97aca3ab484232 | [
"PSF-2.0"
] | 2 | 2019-03-03T00:04:36.000Z | 2020-10-06T16:22:38.000Z | Tools/ccbench/ccbench.py | cocoatomo/Python3.2_C_API_Tutorial | e33d4a285429935aca3178dc2a97aca3ab484232 | [
"PSF-2.0"
] | null | null | null | Tools/ccbench/ccbench.py | cocoatomo/Python3.2_C_API_Tutorial | e33d4a285429935aca3178dc2a97aca3ab484232 | [
"PSF-2.0"
] | 1 | 2019-03-03T00:04:38.000Z | 2019-03-03T00:04:38.000Z | # This file should be kept compatible with both Python 2.6 and Python >= 3.0.
from __future__ import division
from __future__ import print_function
"""
ccbench, a Python concurrency benchmark.
"""
import time
import os
import sys
import functools
import itertools
import threading
import subprocess
import socket
from optparse import OptionParser, SUPPRESS_HELP
import platform
# Compatibility
try:
xrange
except NameError:
xrange = range
try:
map = itertools.imap
except AttributeError:
pass
THROUGHPUT_DURATION = 2.0
LATENCY_PING_INTERVAL = 0.1
LATENCY_DURATION = 2.0
BANDWIDTH_PACKET_SIZE = 1024
BANDWIDTH_DURATION = 2.0
def task_pidigits():
"""Pi calculation (Python)"""
_map = map
_count = itertools.count
_islice = itertools.islice
def calc_ndigits(n):
# From http://shootout.alioth.debian.org/
def gen_x():
return _map(lambda k: (k, 4*k + 2, 0, 2*k + 1), _count(1))
def compose(a, b):
aq, ar, as_, at = a
bq, br, bs, bt = b
return (aq * bq,
aq * br + ar * bt,
as_ * bq + at * bs,
as_ * br + at * bt)
def extract(z, j):
q, r, s, t = z
return (q*j + r) // (s*j + t)
def pi_digits():
z = (1, 0, 0, 1)
x = gen_x()
while 1:
y = extract(z, 3)
while y != extract(z, 4):
z = compose(z, next(x))
y = extract(z, 3)
z = compose((10, -10*y, 0, 1), z)
yield y
return list(_islice(pi_digits(), n))
return calc_ndigits, (50, )
def task_regex():
"""regular expression (C)"""
# XXX this task gives horrendous latency results.
import re
# Taken from the `inspect` module
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)', re.MULTILINE)
with open(__file__, "r") as f:
arg = f.read(2000)
def findall(s):
t = time.time()
try:
return pat.findall(s)
finally:
print(time.time() - t)
return pat.findall, (arg, )
def task_sort():
"""list sorting (C)"""
def list_sort(l):
l = l[::-1]
l.sort()
return list_sort, (list(range(1000)), )
def task_compress_zlib():
"""zlib compression (C)"""
import zlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 3
def compress(s):
zlib.decompress(zlib.compress(s, 5))
return compress, (arg, )
def task_compress_bz2():
"""bz2 compression (C)"""
import bz2
with open(__file__, "rb") as f:
arg = f.read(3000) * 2
def compress(s):
bz2.compress(s)
return compress, (arg, )
def task_hashing():
"""SHA1 hashing (C)"""
import hashlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 30
def compute(s):
hashlib.sha1(s).digest()
return compute, (arg, )
throughput_tasks = [task_pidigits, task_regex]
for mod in 'bz2', 'hashlib':
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
# For whatever reasons, zlib gives irregular results, so we prefer bz2 or
# hashlib if available.
# (NOTE: hashlib releases the GIL from 2.7 and 3.1 onwards)
if bz2 is not None:
throughput_tasks.append(task_compress_bz2)
elif hashlib is not None:
throughput_tasks.append(task_hashing)
else:
throughput_tasks.append(task_compress_zlib)
latency_tasks = throughput_tasks
bandwidth_tasks = [task_pidigits]
class TimedLoop:
def __init__(self, func, args):
self.func = func
self.args = args
def __call__(self, start_time, min_duration, end_event, do_yield=False):
step = 20
niters = 0
duration = 0.0
_time = time.time
_sleep = time.sleep
_func = self.func
_args = self.args
t1 = start_time
while True:
for i in range(step):
_func(*_args)
t2 = _time()
# If another thread terminated, the current measurement is invalid
# => return the previous one.
if end_event:
return niters, duration
niters += step
duration = t2 - start_time
if duration >= min_duration:
end_event.append(None)
return niters, duration
if t2 - t1 < 0.01:
# Minimize interference of measurement on overall runtime
step = step * 3 // 2
elif do_yield:
# OS scheduling of Python threads is sometimes so bad that we
# have to force thread switching ourselves, otherwise we get
# completely useless results.
_sleep(0.0001)
t1 = t2
def run_throughput_test(func, args, nthreads):
assert nthreads >= 1
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
end_event = []
if nthreads == 1:
# Pure single-threaded performance, without any switching or
# synchronization overhead.
start_time = time.time()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=False))
return results
started = False
ready_cond = threading.Condition()
start_cond = threading.Condition()
ready = []
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=True))
threads = []
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# We don't want measurements to include thread startup overhead,
# so we arrange for timing to start after all threads are ready.
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
with start_cond:
start_time = time.time()
started = True
start_cond.notify(nthreads)
for t in threads:
t.join()
return results
def run_throughput_tests(max_threads):
for task in throughput_tasks:
print(task.__doc__)
print()
func, args = task()
nthreads = 1
baseline_speed = None
while nthreads <= max_threads:
results = run_throughput_test(func, args, nthreads)
# Taking the max duration rather than average gives pessimistic
# results rather than optimistic.
speed = sum(r[0] for r in results) / max(r[1] for r in results)
print("threads=%d: %d" % (nthreads, speed), end="")
if baseline_speed is None:
print(" iterations/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
LAT_END = "END"
def _sendto(sock, s, addr):
sock.sendto(s.encode('ascii'), addr)
def _recv(sock, n):
return sock.recv(n).decode('ascii')
def latency_client(addr, nb_pings, interval):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
_time = time.time
_sleep = time.sleep
def _ping():
_sendto(sock, "%r\n" % _time(), addr)
# The first ping signals the parent process that we are ready.
_ping()
# We give the parent a bit of time to notice.
_sleep(1.0)
for i in range(nb_pings):
_sleep(interval)
_ping()
_sendto(sock, LAT_END + "\n", addr)
def run_latency_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--latclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_latency_test(func, args, nthreads):
# Create a listening socket to receive the pings. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
interval = LATENCY_PING_INTERVAL
duration = LATENCY_DURATION
nb_pings = int(duration / interval)
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first ping(s) to arrive before
# unblocking the background threads.
chunks = []
process = run_latency_client(addr=sock.getsockname(),
nb_pings=nb_pings, interval=interval)
s = _recv(sock, 4096)
_time = time.time
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
while LAT_END not in s:
s = _recv(sock, 4096)
t = _time()
chunks.append((t, s))
# Tell the background threads to stop.
end_event.append(None)
for t in threads:
t.join()
process.wait()
sock.close()
for recv_time, chunk in chunks:
# NOTE: it is assumed that a line sent by a client wasn't received
# in two chunks because the lines are very small.
for line in chunk.splitlines():
line = line.strip()
if line and line != LAT_END:
send_time = eval(line)
assert isinstance(send_time, float)
results.append((send_time, recv_time))
return results
def run_latency_tests(max_threads):
for task in latency_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
while nthreads <= max_threads:
results = run_latency_test(func, args, nthreads)
n = len(results)
# We print out milliseconds
lats = [1000 * (t2 - t1) for (t1, t2) in results]
#print(list(map(int, lats)))
avg = sum(lats) / n
dev = (sum((x - avg) ** 2 for x in lats) / n) ** 0.5
print("CPU threads=%d: %d ms. (std dev: %d ms.)" % (nthreads, avg, dev), end="")
print()
#print(" [... from %d samples]" % n)
nthreads += 1
print()
BW_END = "END"
def bandwidth_client(addr, packet_size, duration):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
local_addr = sock.getsockname()
_time = time.time
_sleep = time.sleep
def _send_chunk(msg):
_sendto(sock, ("%r#%s\n" % (local_addr, msg)).rjust(packet_size), addr)
# We give the parent some time to be ready.
_sleep(1.0)
try:
start_time = _time()
end_time = start_time + duration * 2.0
i = 0
while _time() < end_time:
_send_chunk(str(i))
s = _recv(sock, packet_size)
assert len(s) == packet_size
i += 1
_send_chunk(BW_END)
finally:
sock.close()
def run_bandwidth_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--bwclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_bandwidth_test(func, args, nthreads):
# Create a listening socket to receive the packets. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
duration = BANDWIDTH_DURATION
packet_size = BANDWIDTH_PACKET_SIZE
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first packet to arrive before
# unblocking the background threads.
process = run_bandwidth_client(addr=addr,
packet_size=packet_size,
duration=duration)
_time = time.time
# This will also wait for the parent to be ready
s = _recv(sock, packet_size)
remote_addr = eval(s.partition('#')[0])
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
n = 0
first_time = None
while not end_event and BW_END not in s:
_sendto(sock, s, remote_addr)
s = _recv(sock, packet_size)
if first_time is None:
first_time = _time()
n += 1
end_time = _time()
end_event.append(None)
for t in threads:
t.join()
process.kill()
return (n - 1) / (end_time - first_time)
def run_bandwidth_tests(max_threads):
for task in bandwidth_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
baseline_speed = None
while nthreads <= max_threads:
results = run_bandwidth_test(func, args, nthreads)
speed = results
#speed = len(results) * 1.0 / results[-1][0]
print("CPU threads=%d: %.1f" % (nthreads, speed), end="")
if baseline_speed is None:
print(" packets/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
def main():
usage = "usage: %prog [-h|--help] [options]"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--throughput",
action="store_true", dest="throughput", default=False,
help="run throughput tests")
parser.add_option("-l", "--latency",
action="store_true", dest="latency", default=False,
help="run latency tests")
parser.add_option("-b", "--bandwidth",
action="store_true", dest="bandwidth", default=False,
help="run I/O bandwidth tests")
parser.add_option("-i", "--interval",
action="store", type="int", dest="check_interval", default=None,
help="sys.setcheckinterval() value")
parser.add_option("-I", "--switch-interval",
action="store", type="float", dest="switch_interval", default=None,
help="sys.setswitchinterval() value")
parser.add_option("-n", "--num-threads",
action="store", type="int", dest="nthreads", default=4,
help="max number of threads in tests")
# Hidden option to run the pinging and bandwidth clients
parser.add_option("", "--latclient",
action="store", dest="latclient", default=None,
help=SUPPRESS_HELP)
parser.add_option("", "--bwclient",
action="store", dest="bwclient", default=None,
help=SUPPRESS_HELP)
options, args = parser.parse_args()
if args:
parser.error("unexpected arguments")
if options.latclient:
kwargs = eval(options.latclient)
latency_client(**kwargs)
return
if options.bwclient:
kwargs = eval(options.bwclient)
bandwidth_client(**kwargs)
return
if not options.throughput and not options.latency and not options.bandwidth:
options.throughput = options.latency = options.bandwidth = True
if options.check_interval:
sys.setcheckinterval(options.check_interval)
if options.switch_interval:
sys.setswitchinterval(options.switch_interval)
print("== %s %s (%s) ==" % (
platform.python_implementation(),
platform.python_version(),
platform.python_build()[0],
))
# Processor identification often has repeated spaces
cpu = ' '.join(platform.processor().split())
print("== %s %s on '%s' ==" % (
platform.machine(),
platform.system(),
cpu,
))
print()
if options.throughput:
print("--- Throughput ---")
print()
run_throughput_tests(options.nthreads)
if options.latency:
print("--- Latency ---")
print()
run_latency_tests(options.nthreads)
if options.bandwidth:
print("--- I/O bandwidth ---")
print()
run_bandwidth_tests(options.nthreads)
if __name__ == "__main__":
main()
| 29.839344 | 92 | 0.566531 |
5ae5adefe24cefd6244bf8ed53ef10cd29ddb636 | 48,323 | py | Python | pylayers/util/easygui.py | ArtashesH/PylayersWith3D | e4c35279a7da121d9a68282a6a0c3decfba696b4 | [
"MIT"
] | null | null | null | pylayers/util/easygui.py | ArtashesH/PylayersWith3D | e4c35279a7da121d9a68282a6a0c3decfba696b4 | [
"MIT"
] | null | null | null | pylayers/util/easygui.py | ArtashesH/PylayersWith3D | e4c35279a7da121d9a68282a6a0c3decfba696b4 | [
"MIT"
] | null | null | null | """
EasyGuiRevisionInfo = " version 0.73 2008-02-10"
EasyGui provides an easy-to-use interface for simple GUI interaction
with a user. It does not require the programmer to know anything about
tkinter, frames, widgets, callbacks or lambda. All GUI interactions are
invoked by simple function calls that return results.
Documentation is in an accompanying file, easygui.txt.
WARNING about using EasyGui with IDLE
======================================
You may encounter problems using IDLE to run programs that use EasyGui. Try it
and find out. EasyGui is a collection of Tkinter routines that run their own
event loops. IDLE is also a Tkinter application, with its own event loop. The
two may conflict, with the unpredictable results. If you find that you have
problems, try running your program outside of IDLE.
Note that EasyGui requires Tk release 8.0 or greater.
"""
#EasyGuiRevisionInfo = " version 0.72 2004-06-20"
EasyGuiRevisionInfo = " version 0.73 2008-02-10"
# see easygui.txt for revision history information
__all__ = ['ynbox'
, 'ccbox'
, 'boolbox'
, 'indexbox'
, 'msgbox'
, 'buttonbox'
, 'integerbox'
, 'multenterbox'
, 'enterbox'
, 'choicebox'
, 'codebox'
, 'textbox'
, 'diropenbox'
, 'fileopenbox'
, 'filesavebox'
, 'passwordbox'
, 'multpasswordbox'
, 'multchoicebox'
]
import sys
from tkinter import *
from numpy import *
if TkVersion < 8.0 :
print("\n" * 3)
print("*"*75)
print("Running Tk version:", TkVersion)
print("You must be using Tk version 8.0 or greater to use EasyGui.")
print("Terminating.")
print("*"*75)
print("\n" * 3)
sys.exit(0)
rootWindowPosition = "+300+200"
import string
DEFAULT_FONT_FAMILY = ("MS", "Sans", "Serif")
MONOSPACE_FONT_FAMILY = ("Courier")
DEFAULT_FONT_SIZE = 10
BIG_FONT_SIZE = 12
SMALL_FONT_SIZE = 9
CODEBOX_FONT_SIZE = 9
TEXTBOX_FONT_SIZE = DEFAULT_FONT_SIZE
import tkinter.filedialog
#-------------------------------------------------------------------
# various boxes built on top of the basic buttonbox
#-------------------------------------------------------------------
def ynbox(message="Shall I continue?", title=" "):
"""Display a message box with choices of Yes and No.
The default is "Yes".
Returns returns 1 if "Yes" is chosen, or if
the dialog is cancelled (which is interpreted as
choosing the default). Otherwise returns 0.
If invoked without a message parameter, displays a generic request for a confirmation
that the user wishes to continue. So it can be used this way:
if ynbox(): pass # continue
else: sys.exit(0) # exit the program
"""
choices = ["Yes", "No"]
if title == None: title = ""
return boolbox(message, title, choices)
def ccbox(message="Shall I continue?", title=" "):
"""Display a message box with choices of Continue and Cancel.
The default is "Continue".
Returns returns 1 if "Continue" is chosen, or if
the dialog is cancelled (which is interpreted as
choosing the default). Otherwise returns 0.
If invoked without a message parameter, displays a generic request for a confirmation
that the user wishes to continue. So it can be used this way:
if ccbox(): pass # continue
else: sys.exit(0) # exit the program
"""
choices = ["Continue", "Cancel"]
if title == None: title = ""
return boolbox(message, title, choices)
def boolbox(message="Shall I continue?", title=" ", choices=["Yes","No"]):
"""Display a boolean message box.
The default is the first choice.
Returns returns 1 if the first choice is chosen, or if
the dialog is cancelled (which is interpreted as
choosing the default). Otherwise returns 0.
"""
if title == None:
if message == "Shall I continue?": title = "Confirmation"
else: title = ""
reply = buttonbox(msg=message, choices=choices, title=title)
if reply == choices[0]: return 1
else: return 0
def indexbox(message="Shall I continue?", title=" ", choices=["Yes","No"]):
"""Display a buttonbox with the specified choices.
Return the index of the choice selected.
"""
reply = buttonbox(msg=message, choices=choices, title=title)
index = -1
for choice in choices:
index = index + 1
if reply == choice: return index
#-------------------------------------------------------------------
# msgbox
#-------------------------------------------------------------------
def msgbox(message="Shall I continue?", title=" ", buttonMessage="OK"):
"""Display a messagebox
"""
choices = [buttonMessage]
reply = buttonbox(msg=message, choices=choices, title=title)
return reply
#-------------------------------------------------------------------
# getarg
#-------------------------------------------------------------------
def getarg(arg_index, kwarg_name, default, *args, **kwargs):
"""
return the value of an argument that may occur
in either the args or in the kwargs.
If it occurs in both, the kwarg over-rides the arg.
"""
arg = None
if len(args) > arg_index: arg = args[arg_index] # look in args
arg = kwargs.get(kwarg_name,arg) # look in kwargs
if arg == None: return default
return arg
#-------------------------------------------------------------------
# buttonbox
#-------------------------------------------------------------------
def buttonbox(*args, **kwargs):
"""
Display a message, a title, and a set of buttons.
The buttons are defined by the members of the choices list.
Return the text of the button that the user selected.
positional arg 0 (or kwarg "msg" ) = the message to be displayed.
positional arg 1 (or kwarg "title") = the window title
positional arg 2 (or kwarg "choices") = the choices to be displayed
"""
global root, __replyButtonText, __widgetTexts, buttonsFrame
msg = "Please select one of these options."
title = " "
choices = ["Button1", "Button2", "Button3"]
message = getarg(0, "msg" , msg , *args, **kwargs)
title = getarg(1, "title" , title , *args, **kwargs)
choices = getarg(2, "choices", choices , *args, **kwargs)
# Initialize __replyButtonText to the first choice.
# This is what will be used if the window is closed by the close button.
__replyButtonText = choices[0]
root = Tk()
root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose )
root.title(title)
root.iconname('Dialog')
root.geometry(rootWindowPosition)
root.minsize(400, 100)
# ------------- define the frames --------------------------------------------
messageFrame = Frame(root)
messageFrame.pack(side=TOP, fill=BOTH)
buttonsFrame = Frame(root)
buttonsFrame.pack(side=BOTTOM, fill=BOTH)
# -------------------- place the widgets in the frames -----------------------
messageWidget = Message(messageFrame, text=message, width=400)
messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE))
messageWidget.pack(side=TOP, expand=YES, fill=X, padx='3m', pady='3m')
__put_buttons_in_buttonframe(choices)
# -------------- the action begins -----------
# put the focus on the first button
__firstWidget.focus_force()
root.mainloop()
root.destroy()
return __replyButtonText
#-------------------------------------------------------------------
# integerbox
#-------------------------------------------------------------------
def integerbox(message="Enter something.", title=" "
, argDefault=None, argLowerBound=0, argUpperBound=99):
"""Show a box in which a user can enter an integer.
In addition to arguments for message and title, this function accepts
integer arguments for default_value, lowerbound, and upperbound.
The default_value argument may be None.
When the user enters some text, the text is checked to verify
that it can be converted to an integer between the lowerbound and upperbound.
If it can be, the integer (not the text) is returned.
If it cannot, then an error message is displayed, and the integerbox is
redisplayed.
If the user cancels the operation, the default value is returned.
"""
if argDefault == None:
argDefault = ""
elif argDefault == "":
pass
elif type(argDefault) != type(1):
raise AssertionError("integerbox received a non-integer default value of "
+ str(argDefault)
, "Error")
raise "Argument TYPE error in call to EasyGui integerbox"
if type(argLowerBound) != type(1):
raise AssertionError("integerbox received a non-integer default value of "
+ str(argLowerBound)
, "Error")
raise "Argument TYPE error in call to EasyGui integerbox"
if type(argUpperBound) != type(1):
raise AssertionError("integerbox received a non-integer default value of "
+ str(argUpperBound)
, "Error")
raise "Argument TYPE error in call to EasyGui integerbox"
if message == "":
message = ("Enter an integer between " + str(argLowerBound)
+ " and "
+ str(argUpperBound)
)
while 1:
result = enterbox(message, title, str(argDefault))
if result == None:
return argDefault
try:
myInteger = int(result)
except:
msgbox ("The value that you entered is not an integer.", "Error")
continue
if myInteger >= argLowerBound and myInteger <= argUpperBound:
return myInteger
else:
msgbox ("The value that you entered is not between the lower bound of "
+ str(argLowerBound)
+ " and the upper bound of "
+ str(argUpperBound)
+ "."
, "Error")
#-------------------------------------------------------------------
# multenterbox
#-------------------------------------------------------------------
def multenterbox(message="Fill in values for the fields."
, title=" "
, argListOfFieldNames = []
, argListOfFieldValues = []
):
"""
Show screen with multiple data entry fields.
The third argument is a list of fieldnames.
The the forth argument is a list of field values.
If there are fewer values than names, the list of values is padded with
empty strings until the number of values is the same as the number of names.
If there are more values than names, the list of values
is truncated so that there are as many values as names.
Returns a list of the values of the fields,
or None if the user cancels the operation.
Here is some example code, that shows how values returned from
multenterbox can be checked for validity before they are accepted.
----------------------------------------------------------------------
msg = "Enter your personal information"
title = "Credit Card Application"
fieldNames = ["Name","Street Address","City","State","ZipCode"]
fieldValues = [] # we start with blanks for the values
fieldValues = multenterbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "": break # no problems found
fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
print("Reply was:", fieldValues)
----------------------------------------------------------------------
"""
return __multfillablebox(
message,title,argListOfFieldNames,argListOfFieldValues,None)
def pointbox(pt,npt=1,text1='Enter point coordinates',text2=''):
"""
GUI for points
npt is a number of points (default : npt=1)
"""
if (npt==1):
point=multenterbox(text1,text2,
('x','y','z'),(str(pt[0]),str(pt[1]),str(pt[2])))
else:
point=multenterbox(text1,text2,
('x','y','z','npt'),(str(pt[0]),str(pt[1]),str(pt[2]),str(npt)))
npt = eval(point[3])
px = eval(point[0])
py = eval(point[1])
pz = eval(point[2])
pt = array([px,py,pz])
return (pt,npt)
def multpasswordbox(message="Fill in values for the fields."
, title=" "
, argListOfFieldNames = []
, argListOfFieldValues = []
):
"""
Same interface as multenterbox. But in multpassword box,
the last of the fields is assumed to be a password, and
is masked with asterisks.
Here is some example code, that shows how values returned from
multpasswordbox can be checked for validity before they are accepted.
----------------------------------------------------------------------
msg = "Enter logon information"
title = "Demo of multpasswordbox"
fieldNames = ["Server ID", "User ID", "Password"]
fieldValues = [] # we start with blanks for the values
fieldValues = multpasswordbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "": break # no problems found
fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues)
print("Reply was:", fieldValues)
----------------------------------------------------------------------
"""
return __multfillablebox(
message,title,argListOfFieldNames,argListOfFieldValues,"*")
def __multfillablebox(message="Fill in values for the fields."
, title=" "
, argListOfFieldNames = []
, argListOfFieldValues = []
, argMaskCharacter = None
):
global root, __multenterboxText, __multenterboxDefaultText, cancelButton, entryWidget, okButton
if title == None: title == ""
choices = ["OK", "Cancel"]
if len(argListOfFieldNames) == 0: return None
if len(argListOfFieldValues) == len(argListOfFieldNames): pass
elif len(argListOfFieldValues) > len(argListOfFieldNames):
argListOfFieldNames = argListOfFieldNames[0:len(argListOfFieldValues)]
else:
while len(argListOfFieldValues) < len(argListOfFieldNames):
argListOfFieldValues.append("")
root = Tk()
root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose )
root.title(title)
root.iconname('Dialog')
root.geometry(rootWindowPosition)
root.bind("<Escape>", __multenterboxCancel)
# -------------------- put subframes in the root --------------------
messageFrame = Frame(root)
messageFrame.pack(side=TOP, fill=BOTH)
#-------------------- the message widget ----------------------------
#messageWidget = Message(messageFrame, width="4.5", text=message)
messageWidget = Message(messageFrame, text=message)
messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE))
messageWidget.pack(side=RIGHT, expand=1, fill=BOTH, padx='3m', pady='3m')
global entryWidgets
entryWidgets = []
lastWidgetIndex = len(argListOfFieldNames) - 1
for widgetIndex in range(len(argListOfFieldNames)):
argFieldName = argListOfFieldNames[widgetIndex]
argFieldValue = argListOfFieldValues[widgetIndex]
entryFrame = Frame(root)
entryFrame.pack(side=TOP, fill=BOTH)
# --------- entryWidget ----------------------------------------------
labelWidget = Label(entryFrame, text=argFieldName)
labelWidget.pack(side=LEFT)
entryWidgets.append(Entry(entryFrame, width=40))
entryWidgets[widgetIndex].configure(font=(DEFAULT_FONT_FAMILY,BIG_FONT_SIZE))
entryWidgets[widgetIndex].pack(side=RIGHT, padx="3m")
entryWidgets[widgetIndex].bind("<Return>", __multenterboxGetText)
entryWidgets[widgetIndex].bind("<Escape>", __multenterboxCancel)
# for the last entryWidget, if this is a multpasswordbox,
# show the contents as just asterisks
if widgetIndex == lastWidgetIndex:
if argMaskCharacter:
entryWidgets[widgetIndex].configure(show=argMaskCharacter)
# put text into the entryWidget
entryWidgets[widgetIndex].insert(0,argFieldValue)
widgetIndex += 1
# ------------------ ok button -------------------------------
buttonsFrame = Frame(root)
buttonsFrame.pack(side=BOTTOM, fill=BOTH)
okButton = Button(buttonsFrame, takefocus=1, text="OK")
okButton.pack(expand=1, side=LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
okButton.bind("<Return>" , __multenterboxGetText)
okButton.bind("<Button-1>", __multenterboxGetText)
# ------------------ cancel button -------------------------------
cancelButton = Button(buttonsFrame, takefocus=1, text="Cancel")
cancelButton.pack(expand=1, side=RIGHT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
cancelButton.bind("<Return>" , __multenterboxCancel)
cancelButton.bind("<Button-1>", __multenterboxCancel)
# ------------------- time for action! -----------------
entryWidgets[0].focus_force() # put the focus on the entryWidget
root.mainloop() # run it!
# -------- after the run has completed ----------------------------------
root.destroy() # button_click didn't destroy root, so we do it now
return __multenterboxText
def __multenterboxGetText(event):
global root, __multenterboxText, entryWidget
__multenterboxText = []
global entryWidgets
for entryWidget in entryWidgets:
__multenterboxText.append(entryWidget.get())
root.quit()
def __multenterboxCancel(event):
global root, __multenterboxDefaultText, __multenterboxText
__multenterboxText = None
root.quit()
#-------------------------------------------------------------------
# enterbox
#-------------------------------------------------------------------
def enterbox(message="Enter something.", title=" ", argDefaultText=""):
"""Show a box in which a user can enter some text.
You may optionally specify some default text, which will appear in the
enterbox when it is displayed.
Returns the text that the user entered, or None if he cancels the operation.
"""
return __fillablebox(message, title, argDefaultText, None)
def passwordbox(message="Enter your password.", title=" ", argDefaultPassword=""):
"""Show a box in which a user can enter a password.
The text is masked with asterisks, so the password is not displayed.
Returns the text that the user entered, or None if he cancels the operation.
"""
return __fillablebox(message, title, argDefaultPassword, "*")
def __fillablebox(message, title, argDefaultText, argMaskCharacter):
"""Show a box in which a user can enter some text.
You may optionally specify some default text, which will appear in the
enterbox when it is displayed.
Returns the text that the user entered, or None if he cancels the operation.
"""
global root, __enterboxText, __enterboxDefaultText, cancelButton, entryWidget, okButton
if title == None: title == ""
if argDefaultText == None: argDefaultText = ""
__enterboxDefaultText = argDefaultText
__enterboxText = __enterboxDefaultText
choices = ["OK", "Cancel"]
root = Tk()
root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose )
root.title(title)
root.iconname('Dialog')
root.geometry(rootWindowPosition)
root.bind("<Escape>", __enterboxCancel)
# -------------------- put subframes in the root --------------------
messageFrame = Frame(root)
messageFrame.pack(side=TOP, fill=BOTH)
entryFrame = Frame(root)
entryFrame.pack(side=TOP, fill=BOTH)
buttonsFrame = Frame(root)
buttonsFrame.pack(side=BOTTOM, fill=BOTH)
#-------------------- the message widget ----------------------------
messageWidget = Message(messageFrame, width="4.5i", text=message)
messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE))
messageWidget.pack(side=RIGHT, expand=1, fill=BOTH, padx='3m', pady='3m')
# --------- entryWidget ----------------------------------------------
entryWidget = Entry(entryFrame, width=40)
entryWidget.configure(font=(DEFAULT_FONT_FAMILY,BIG_FONT_SIZE))
if argMaskCharacter:
entryWidget.configure(show=argMaskCharacter)
entryWidget.pack(side=LEFT, padx="3m")
entryWidget.bind("<Return>", __enterboxGetText)
entryWidget.bind("<Escape>", __enterboxCancel)
# put text into the entryWidget
entryWidget.insert(0,__enterboxDefaultText)
# ------------------ ok button -------------------------------
okButton = Button(buttonsFrame, takefocus=1, text="OK")
okButton.pack(expand=1, side=LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
okButton.bind("<Return>" , __enterboxGetText)
okButton.bind("<Button-1>", __enterboxGetText)
# ------------------ (possible) restore button -------------------------------
if argDefaultText != None:
# make a button to restore the default text
restoreButton = Button(buttonsFrame, takefocus=1, text="Restore default")
restoreButton.pack(expand=1, side=LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
restoreButton.bind("<Return>" , __enterboxRestore)
restoreButton.bind("<Button-1>", __enterboxRestore)
# ------------------ cancel button -------------------------------
cancelButton = Button(buttonsFrame, takefocus=1, text="Cancel")
cancelButton.pack(expand=1, side=RIGHT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
cancelButton.bind("<Return>" , __enterboxCancel)
cancelButton.bind("<Button-1>", __enterboxCancel)
# ------------------- time for action! -----------------
entryWidget.focus_force() # put the focus on the entryWidget
root.mainloop() # run it!
# -------- after the run has completed ----------------------------------
root.destroy() # button_click didn't destroy root, so we do it now
return __enterboxText
def __enterboxGetText(event):
global root, __enterboxText, entryWidget
__enterboxText = entryWidget.get()
root.quit()
def __enterboxRestore(event):
global root, __enterboxText, entryWidget
entryWidget.delete(0,len(entryWidget.get()))
entryWidget.insert(0, __enterboxDefaultText)
def __enterboxCancel(event):
global root, __enterboxDefaultText, __enterboxText
__enterboxText = None
root.quit()
def denyWindowManagerClose():
""" don't allow WindowManager close
"""
x = Tk()
x.withdraw()
x.bell()
x.destroy()
#-------------------------------------------------------------------
#: multchoicebox
#-------------------------------------------------------------------
def multchoicebox(*args, **kwargs):
"""Present the user with a list of choices.
allow him to select multiple items and return them in a list.
if the user doesn't choose anything from the list, return the empty list.
return None if he cancelled selection.
positional arg 0 (or kwarg "msg" ) = the message to be displayed.
positional arg 1 (or kwarg "title") = the window title
positional arg 2 (or kwarg "choices") = the choices to be displayed
"""
msg = "Pick as many items as you like."
title = " "
choices = ["program logic error - no choices specified"]
msg = getarg(0, "msg" , msg , *args, **kwargs)
title = getarg(1, "title" , title , *args, **kwargs)
choices = getarg(2, "choices", choices , *args, **kwargs)
global __choiceboxMultipleSelect
__choiceboxMultipleSelect = 1
return __choicebox(msg, title, choices)
#-------------------------------------------------------------------
#: choicebox
#-------------------------------------------------------------------
def choicebox(*args, **kwargs):
"""
Present the user with a list of choices.
return the choice that he selects.
return None if he cancels the selection selection.
positional arg 0 (or kwarg "msg" ) = the message to be displayed.
positional arg 1 (or kwarg "title") = the window title
positional arg 2 (or kwarg "choices") = the choices to be displayed
"""
msg = "Pick something."
title = " "
choices = ["program logic error - no choices specified"]
msg = getarg(0, "msg" , msg , *args, **kwargs)
title = getarg(1, "title" , title , *args, **kwargs)
choices = getarg(2, "choices", choices , *args, **kwargs)
global __choiceboxMultipleSelect
__choiceboxMultipleSelect = 0
return __choicebox(msg, title, choices)
def __choicebox(message, title, choices):
"""
internal routine to support choicebox() and multchoicebox()
"""
global root, __choiceboxResults, choiceboxWidget, defaultText
global choiceboxWidget, choiceboxChoices
# If choices is a tuple, we make it a list so we can sort it.
# If choices is already a list, we make a new list, so that when
# we sort the choices, we don't affect the list object that we
# were given.
choices = list(choices)
# make sure all choices are strings
for index in range(len(choices)):
choices[index] = str(choices[index])
choiceboxButtons = ["OK", "Cancel"]
lines_to_show = min(len(choices), 20)
lines_to_show = 20
if title == None: title = ""
# Initialize __choiceboxResults
# This is the value that will be returned if the user clicks the close icon
__choiceboxResults = None
root = Tk()
root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose )
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
root_width = int((screen_width * 0.8))
root_height = int((screen_height * 0.5))
root_xpos = int((screen_width * 0.1))
root_ypos = int((screen_height * 0.05))
root.title(title)
root.iconname('Dialog')
rootWindowPosition = "+0+0"
root.geometry(rootWindowPosition)
root.expand=NO
root.minsize(root_width, root_height)
rootWindowPosition = "+" + str(root_xpos) + "+" + str(root_ypos)
root.geometry(rootWindowPosition)
# ---------------- put the frames in the window -----------------------------------------
message_and_buttonsFrame = Frame(root)
message_and_buttonsFrame.pack(side=TOP, fill=X, expand=NO)
messageFrame = Frame(message_and_buttonsFrame)
messageFrame.pack(side=LEFT, fill=X, expand=YES)
buttonsFrame = Frame(message_and_buttonsFrame)
buttonsFrame.pack(side=RIGHT, expand=NO, pady=0)
choiceboxFrame = Frame(root)
choiceboxFrame.pack(side=BOTTOM, fill=BOTH, expand=YES)
# -------------------------- put the widgets in the frames ------------------------------
# ---------- put a message widget in the message frame-------------------
messageWidget = Message(messageFrame, anchor=NW, text=message, width=int(root_width * 0.9))
messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE))
messageWidget.pack(side=LEFT, expand=YES, fill=BOTH, padx='1m', pady='1m')
# -------- put the choiceboxWidget in the choiceboxFrame ---------------------------
choiceboxWidget = Listbox(choiceboxFrame
, height=lines_to_show
, borderwidth="1m"
, relief="flat"
, bg="white"
)
if __choiceboxMultipleSelect:
choiceboxWidget.configure(selectmode=MULTIPLE)
choiceboxWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE))
# add a vertical scrollbar to the frame
rightScrollbar = Scrollbar(choiceboxFrame, orient=VERTICAL, command=choiceboxWidget.yview)
choiceboxWidget.configure(yscrollcommand = rightScrollbar.set)
# add a horizontal scrollbar to the frame
bottomScrollbar = Scrollbar(choiceboxFrame, orient=HORIZONTAL, command=choiceboxWidget.xview)
choiceboxWidget.configure(xscrollcommand = bottomScrollbar.set)
# pack the Listbox and the scrollbars. Note that although we must define
# the textbox first, we must pack it last, so that the bottomScrollbar will
# be located properly.
bottomScrollbar.pack(side=BOTTOM, fill = X)
rightScrollbar.pack(side=RIGHT, fill = Y)
choiceboxWidget.pack(side=LEFT, padx="1m", pady="1m", expand=YES, fill=BOTH)
#---------------------------------------------------
# sort the choices
# eliminate duplicates
# put the choices into the choiceboxWidget
#---------------------------------------------------
for index in range(len(choices)):
choices[index] == str(choices[index])
choices.sort( lambda x,y: cmp(x.lower(), y.lower())) # case-insensitive sort
lastInserted = None
choiceboxChoices = []
for choice in choices:
if choice == lastInserted: pass
else:
choiceboxWidget.insert(END, choice)
choiceboxChoices.append(choice)
lastInserted = choice
root.bind('<Any-Key>', KeyboardListener)
# put the buttons in the buttonsFrame
if len(choices) > 0:
okButton = Button(buttonsFrame, takefocus=YES, text="OK", height=1, width=6)
okButton.pack(expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
okButton.bind("<Return>", __choiceboxGetChoice)
okButton.bind("<Button-1>",__choiceboxGetChoice)
# now bind the keyboard events
choiceboxWidget.bind("<Return>", __choiceboxGetChoice)
choiceboxWidget.bind("<Double-Button-1>", __choiceboxGetChoice)
else:
# now bind the keyboard events
choiceboxWidget.bind("<Return>", __choiceboxCancel)
choiceboxWidget.bind("<Double-Button-1>", __choiceboxCancel)
cancelButton = Button(buttonsFrame, takefocus=YES, text="Cancel", height=1, width=6)
cancelButton.pack(expand=NO, side=BOTTOM, padx='2m', pady='1m', ipady="1m", ipadx="2m")
cancelButton.bind("<Return>", __choiceboxCancel)
cancelButton.bind("<Button-1>", __choiceboxCancel)
# add special buttons for multiple select features
if len(choices) > 0 and __choiceboxMultipleSelect:
selectionButtonsFrame = Frame(messageFrame)
selectionButtonsFrame.pack(side=RIGHT, fill=Y, expand=NO)
selectAllButton = Button(selectionButtonsFrame, text="Select All", height=1, width=6)
selectAllButton.bind("<Button-1>",__choiceboxSelectAll)
selectAllButton.pack(expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
clearAllButton = Button(selectionButtonsFrame, text="Clear All", height=1, width=6)
clearAllButton.bind("<Button-1>",__choiceboxClearAll)
clearAllButton.pack(expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
# -------------------- bind some keyboard events ----------------------------
root.bind("<Escape>", __choiceboxCancel)
# --------------------- the action begins -----------------------------------
# put the focus on the choiceboxWidget, and the select highlight on the first item
choiceboxWidget.select_set(0)
choiceboxWidget.focus_force()
# --- run it! -----
root.mainloop()
root.destroy()
return __choiceboxResults
def __choiceboxGetChoice(event):
global root, __choiceboxResults, choiceboxWidget
if __choiceboxMultipleSelect:
__choiceboxResults = [choiceboxWidget.get(index) for index in choiceboxWidget.curselection()]
else:
choice_index = choiceboxWidget.curselection()
__choiceboxResults = choiceboxWidget.get(choice_index)
# print "Debugging> mouse-event=", event, " event.type=", event.type
# print "Debugging> choice =", choice_index, __choiceboxResults
root.quit()
def __choiceboxSelectAll(event):
global choiceboxWidget, choiceboxChoices
choiceboxWidget.selection_set(0, len(choiceboxChoices)-1)
def __choiceboxClearAll(event):
global choiceboxWidget, choiceboxChoices
choiceboxWidget.selection_clear(0, len(choiceboxChoices)-1)
def __choiceboxCancel(event):
global root, __choiceboxResults
__choiceboxResults = None
root.quit()
def KeyboardListener(event):
global choiceboxChoices, choiceboxWidget
key = event.keysym
if len(key) <= 1:
if key in string.printable:
# Find the key in the list.
# before we clear the list, remember the selected member
try:
start_n = int(choiceboxWidget.curselection()[0])
except IndexError:
start_n = -1
## clear the selection.
choiceboxWidget.selection_clear(0, 'end')
## start from previous selection +1
for n in range(start_n+1, len(choiceboxChoices)):
item = choiceboxChoices[n]
if item[0].lower() == key.lower():
choiceboxWidget.selection_set(first=n)
choiceboxWidget.see(n)
return
else:
# has not found it so loop from top
for n in range(len(choiceboxChoices)):
item = choiceboxChoices[n]
if item[0].lower() == key.lower():
choiceboxWidget.selection_set(first = n)
choiceboxWidget.see(n)
return
# nothing matched -- we'll look for the next logical choice
for n in range(len(choiceboxChoices)):
item = choiceboxChoices[n]
if item[0].lower() > key.lower():
if n > 0:
choiceboxWidget.selection_set(first = (n-1))
else:
choiceboxWidget.selection_set(first = 0)
choiceboxWidget.see(n)
return
# still no match (nothing was greater than the key)
# we set the selection to the first item in the list
lastIndex = len(choiceboxChoices)-1
choiceboxWidget.selection_set(first = lastIndex)
choiceboxWidget.see(lastIndex)
return
#-------------------------------------------------------------------
# codebox
#-------------------------------------------------------------------
def codebox(message="", title=" ", text=""):
"""
Display some text in a monospaced font, with no line wrapping.
This function is suitable for displaying code and text that is
formatted using spaces.
The text parameter should be a string, or a list or tuple of lines to be
displayed in the textbox.
"""
textbox(message, title, text, codebox=1 )
#-------------------------------------------------------------------
# textbox
#-------------------------------------------------------------------
def textbox(message="", title=" ", text="", codebox=0):
"""Display some text in a proportional font with line wrapping at word breaks.
This function is suitable for displaying general written text.
The text parameter should be a string, or a list or tuple of lines to be
displayed in the textbox.
"""
if message == None: message = ""
if title == None: title = ""
global root, __replyButtonText, __widgetTexts, buttonsFrame
choices = ["0K"]
__replyButtonText = choices[0]
root = Tk()
root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose )
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
root_width = int((screen_width * 0.8))
root_height = int((screen_height * 0.5))
root_xpos = int((screen_width * 0.1))
root_ypos = int((screen_height * 0.05))
root.title(title)
root.iconname('Dialog')
rootWindowPosition = "+0+0"
root.geometry(rootWindowPosition)
root.expand=NO
root.minsize(root_width, root_height)
rootWindowPosition = "+" + str(root_xpos) + "+" + str(root_ypos)
root.geometry(rootWindowPosition)
mainframe = Frame(root)
mainframe.pack(side=TOP, fill=BOTH, expand=YES)
# ---- put frames in the window -----------------------------------
# we pack the textboxFrame first, so it will expand first
textboxFrame = Frame(mainframe, borderwidth=3)
textboxFrame.pack(side=BOTTOM , fill=BOTH, expand=YES)
message_and_buttonsFrame = Frame(mainframe)
message_and_buttonsFrame.pack(side=TOP, fill=X, expand=NO)
messageFrame = Frame(message_and_buttonsFrame)
messageFrame.pack(side=LEFT, fill=X, expand=YES)
buttonsFrame = Frame(message_and_buttonsFrame)
buttonsFrame.pack(side=RIGHT, expand=NO)
# -------------------- put widgets in the frames --------------------
# put a textbox in the top frame
if codebox:
character_width = int((root_width * 0.6) / CODEBOX_FONT_SIZE)
textbox = Text(textboxFrame,height=25,width=character_width, padx="2m", pady="1m")
textbox.configure(wrap=NONE)
textbox.configure(font=(MONOSPACE_FONT_FAMILY, CODEBOX_FONT_SIZE))
else:
character_width = int((root_width * 0.6) / SMALL_FONT_SIZE)
textbox = Text(
textboxFrame
, height=25
, width=character_width
, padx="2m"
, pady="1m"
)
textbox.configure(wrap=WORD)
textbox.configure(font=(DEFAULT_FONT_FAMILY,TEXTBOX_FONT_SIZE))
# some simple keybindings for scrolling
mainframe.bind("<Next>" , textbox.yview_scroll( 1,PAGES))
mainframe.bind("<Prior>", textbox.yview_scroll(-1,PAGES))
mainframe.bind("<Right>", textbox.xview_scroll( 1,PAGES))
mainframe.bind("<Left>" , textbox.xview_scroll(-1,PAGES))
mainframe.bind("<Down>", textbox.yview_scroll( 1,UNITS))
mainframe.bind("<Up>" , textbox.yview_scroll(-1,UNITS))
# add a vertical scrollbar to the frame
rightScrollbar = Scrollbar(textboxFrame, orient=VERTICAL, command=textbox.yview)
textbox.configure(yscrollcommand = rightScrollbar.set)
# add a horizontal scrollbar to the frame
bottomScrollbar = Scrollbar(textboxFrame, orient=HORIZONTAL, command=textbox.xview)
textbox.configure(xscrollcommand = bottomScrollbar.set)
# pack the textbox and the scrollbars. Note that although we must define
# the textbox first, we must pack it last, so that the bottomScrollbar will
# be located properly.
# Note that we need a bottom scrollbar only for code.
# Text will be displayed with wordwrap, so we don't need to have a horizontal
# scroll for it.
if codebox:
bottomScrollbar.pack(side=BOTTOM, fill=X)
rightScrollbar.pack(side=RIGHT, fill=Y)
textbox.pack(side=LEFT, fill=BOTH, expand=YES)
# ---------- put a message widget in the message frame-------------------
messageWidget = Message(messageFrame, anchor=NW, text=message, width=int(root_width * 0.9))
messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE))
messageWidget.pack(side=LEFT, expand=YES, fill=BOTH, padx='1m', pady='1m')
# put the buttons in the buttonsFrame
okButton = Button(buttonsFrame, takefocus=YES, text="OK", height=1, width=6)
okButton.pack(expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
okButton.bind("<Return>" , __textboxOK)
okButton.bind("<Button-1>", __textboxOK)
okButton.bind("<Escape>" , __textboxOK)
# ----------------- the action begins ----------------------------------------
try:
# load the text into the textbox
if type(text) == type("abc"): pass
else:
try:
text = "".join(text) # convert a list or a tuple to a string
except:
msgbox("Exception when trying to convert "+ str(type(text)) + " to text in textbox")
sys.exit(16)
textbox.insert(END,text, "normal")
# disable the textbox, so the text cannot be edited
textbox.configure(state=DISABLED)
except:
msgbox("Exception when trying to load the textbox.")
sys.exit(16)
try:
okButton.focus_force()
except:
msgbox("Exception when trying to put focus on okButton.")
sys.exit(16)
root.mainloop()
root.destroy()
return __replyButtonText
def __textboxOK(event):
global root
root.quit()
#-------------------------------------------------------------------
# diropenbox
#-------------------------------------------------------------------
def diropenbox(msg=None, title=None, argInitialDir=None):
"""A dialog to get a directory name.
Note that the msg argument, if specified, is ignored.
Returns the name of a directory, or None if user chose to cancel.
If an initial directory is specified in argument 3,
and that directory exists, then the
dialog box will start with that directory.
"""
root = Tk()
root.withdraw()
if argInitialDir == None:
f = tkFileDialog.askdirectory(parent=root, title=title)
else:
f = tkFileDialog.askdirectory(parent=root, title=title, initialdir=argInitialDir)
if f == "": return None
return f
#-------------------------------------------------------------------
# fileopenbox
#-------------------------------------------------------------------
def fileopenbox(msg=None, title=None, argInitialFile=None):
"""A dialog to get a file name.
Returns the name of a file, or None if user chose to cancel.
if argInitialFile contains a valid filename, the dialog will
be positioned at that file when it appears.
"""
root = Tk()
root.withdraw()
f = tkFileDialog.askopenfilename(parent=root,title=title, initialfile=argInitialFile)
if f == "": return None
return f
#-------------------------------------------------------------------
# filesavebox
#-------------------------------------------------------------------
def filesavebox(msg=None, title=None, argInitialFile=None):
"""A file to get the name of a file to save.
Returns the name of a file, or None if user chose to cancel.
if argInitialFile contains a valid filename, the dialog will
be positioned at that file when it appears.
"""
root = Tk()
root.withdraw()
f = tkFileDialog.asksaveasfilename(parent=root, title=title, initialfile=argInitialFile)
if f == "": return None
return f
#-------------------------------------------------------------------
# utility routines
#-------------------------------------------------------------------
# These routines are used by several other functions in the EasyGui module.
def __buttonEvent(event):
"""Handle an event that is generated by a person clicking a button.
"""
global root, __widgetTexts, __replyButtonText
__replyButtonText = __widgetTexts[event.widget]
root.quit() # quit the main loop
def __put_buttons_in_buttonframe(choices):
"""Put the buttons in the buttons frame
"""
global __widgetTexts, __firstWidget, buttonsFrame
__widgetTexts = {}
i = 0
for buttonText in choices:
tempButton = Button(buttonsFrame, takefocus=1, text=buttonText)
tempButton.pack(expand=YES, side=LEFT, padx='1m', pady='1m', ipadx='2m', ipady='1m')
# remember the text associated with this widget
__widgetTexts[tempButton] = buttonText
# remember the first widget, so we can put the focus there
if i == 0:
__firstWidget = tempButton
i = 1
# bind the keyboard events to the widget
tempButton.bind("<Return>", __buttonEvent)
tempButton.bind("<Button-1>", __buttonEvent)
#-------------------------------------------------------------------
# test driver code
#-------------------------------------------------------------------
def _test():
# simple way to clear the console
print("\n" * 100)
# START DEMONSTRATION DATA ===================================================
choices_abc = ["This is choice 1", "And this is choice 2"]
message = "Pick one! This is a huge choice, and you've got to make the right one " \
"or you will surely mess up the rest of your life, and the lives of your " \
"friends and neighbors!"
title = ""
# ============================= define a code snippet =========================
code_snippet = ("dafsdfa dasflkj pp[oadsij asdfp;ij asdfpjkop asdfpok asdfpok asdfpok"*3) +"\n"+\
"""# here is some dummy Python code
for someItem in myListOfStuff:
do something(someItem)
do something()
do something()
if somethingElse(someItem):
doSomethingEvenMoreInteresting()
"""*16
#======================== end of code snippet ==============================
#================================= some text ===========================
text_snippet = ((\
"""It was the best of times, and it was the worst of times. The rich ate cake, and the poor had cake recommended to them, but wished only for enough cash to buy bread. The time was ripe for revolution! """ \
*5)+"\n\n")*10
#===========================end of text ================================
intro_message = ("Pick the kind of box that you wish to demo.\n\n"
+ "In EasyGui, all GUI interactions are invoked by simple function calls.\n\n" +
"EasyGui is different from other GUIs in that it is NOT event-driven. It allows" +
" you to program in a traditional linear fashion, and to put up dialogs for simple" +
" input and output when you need to. If you are new to the event-driven paradigm" +
" for GUIs, EasyGui will allow you to be productive with very basic tasks" +
" immediately. Later, if you wish to make the transition to an event-driven GUI" +
" paradigm, you can move to an event-driven style with a more powerful GUI package" +
"such as anygui, PythonCard, Tkinter, wxPython, etc."
+ "\n\nEasyGui is running Tk version: " + str(TkVersion)
)
#========================================== END DEMONSTRATION DATA
while 1: # do forever
choices = [
"msgbox",
"buttonbox",
"choicebox",
"multchoicebox",
"textbox",
"ynbox",
"ccbox",
"enterbox",
"codebox",
"integerbox",
"boolbox",
"indexbox",
"filesavebox",
"fileopenbox",
"passwordbox",
"multenterbox",
"multpasswordbox",
"diropenbox"
]
choice = choicebox(msg=intro_message
, title="EasyGui " + EasyGuiRevisionInfo
, choices=choices)
if choice == None: return
reply = choice.split()
if reply[0] == "msgbox":
reply = msgbox("short message", "This is a long title")
print("Reply was:", reply)
elif reply[0] == "buttonbox":
reply = buttonbox()
print("Reply was:", reply)
reply = buttonbox(msg=message
, title="Demo of Buttonbox with many, many buttons!"
, choices=choices)
print("Reply was:", reply)
elif reply[0] == "boolbox":
reply = boolbox()
print("Reply was:", reply)
elif reply[0] == "integerbox":
reply = integerbox(
"Enter a number between 3 and 333",
"Demo: integerbox WITH a default value",
222, 3, 333)
print("Reply was:", reply)
reply = integerbox(
"Enter a number between 0 and 99",
"Demo: integerbox WITHOUT a default value"
)
print("Reply was:", reply)
elif reply[0] == "diropenbox":
title = "Demo of diropenbox"
msg = "This is a test of the diropenbox.\n\nPick the directory that you wish to open."
d = diropenbox(msg, title)
print("You chose directory...:", d)
elif reply[0] == "fileopenbox":
f = fileopenbox()
print("You chose to open file:", f)
elif reply[0] == "filesavebox":
f = filesavebox()
print("You chose to save file:", f)
elif reply[0] == "indexbox":
title = reply[0]
msg = "Demo of " + reply[0]
choices = ["Choice1", "Choice2", "Choice3", "Choice4"]
reply = indexbox(msg, title, choices)
print("Reply was:", reply)
elif reply[0] == "passwordbox":
reply = passwordbox("Demo of password box WITHOUT default"
+ "\n\nEnter your secret password", "Member Logon")
print("Reply was:", str(reply))
reply = passwordbox("Demo of password box WITH default"
+ "\n\nEnter your secret password", "Member Logon", "alfie")
print("Reply was:", str(reply))
elif reply[0] == "enterbox":
reply = enterbox("Enter the name of your best friend:", "Love!", "Suzy Smith")
print("Reply was:", str(reply))
reply = enterbox("Enter the name of your worst enemy:", "Hate!")
print("Reply was:", str(reply))
elif reply[0] == "multenterbox":
msg = "Enter your personal information"
title = "Credit Card Application"
fieldNames = ["Name","Street Address","City","State","ZipCode"]
fieldValues = [] # we start with blanks for the values
fieldValues = multenterbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "": break # no problems found
fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
print("Reply was:", fieldValues)
elif reply[0] == "multpasswordbox":
msg = "Enter logon information"
title = "Demo of multpasswordbox"
fieldNames = ["Server ID", "User ID", "Password"]
fieldValues = [] # we start with blanks for the values
fieldValues = multpasswordbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "": break # no problems found
fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues)
print("Reply was:", fieldValues)
elif reply[0] == "ynbox":
reply = ynbox(message, title)
print("Reply was:", reply)
elif reply[0] == "ccbox":
reply = ccbox(message)
print("Reply was:", reply)
elif reply[0] == "choicebox":
longchoice = "This is an example of a very long option which you may or may not wish to choose."*2
listChoices = ["nnn", "ddd", "eee", "fff", "aaa", longchoice
, "aaa", "bbb", "ccc", "ggg", "hhh", "iii", "jjj", "kkk", "LLL", "mmm" , "nnn", "ooo", "ppp", "qqq", "rrr", "sss", "ttt", "uuu", "vvv"]
message = "Pick something. " + ("A wrapable sentence of text ?! "*30) + "\nA separate line of text."*6
reply = choicebox(msg=message, choices=listChoices)
print("Reply was:", reply)
message = "Pick something. "
reply = choicebox(msg=message, choices=listChoices)
print("Reply was:", reply)
message = "Pick something. "
reply = choicebox(msg="The list of choices is empty!", choices=[])
print("Reply was:", reply)
elif reply[0] == "multchoicebox":
listChoices = ["aaa", "bbb", "ccc", "ggg", "hhh", "iii", "jjj", "kkk"
, "LLL", "mmm" , "nnn", "ooo", "ppp", "qqq"
, "rrr", "sss", "ttt", "uuu", "vvv"]
message = "Pick as many choices as you wish."
reply = multchoicebox(message,"DEMO OF multchoicebox", listChoices)
print("Reply was:", reply)
elif reply[0] == "textbox":
message = "Here is some sample text. " * 16
reply = textbox(message, "Text Sample", text_snippet)
print("Reply was:", reply)
elif reply[0] == "codebox":
message = "Here is some sample code. " * 16
reply = codebox(message, "Code Sample", code_snippet)
print("Reply was:", reply)
else:
msgbox("Choice\n\n" + choice + "\n\nis not recognized", "Program Logic Error")
return
if __name__ == '__main__':
_test()
| 33.580959 | 209 | 0.652112 |
2585bb11557ba0225ddcd3a77a31a15631114318 | 434 | py | Python | api/wsgi/src/manage.py | easyCZ/SLIP-A-2015 | ad386df9c438d93ec89f68c63d3deda12d27d1ed | [
"MIT"
] | 2 | 2020-06-08T19:17:10.000Z | 2020-06-08T20:45:07.000Z | api/wsgi/src/manage.py | easyCZ/SLIP-A-2015 | ad386df9c438d93ec89f68c63d3deda12d27d1ed | [
"MIT"
] | 14 | 2015-10-07T09:31:50.000Z | 2022-02-23T07:34:10.000Z | api/wsgi/src/manage.py | easyCZ/SLIP-A-2015 | ad386df9c438d93ec89f68c63d3deda12d27d1ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
# Use local config if not on openshift server
if os.environ.get('SLIP_ENV') == 'local':
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.local")
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 27.125 | 75 | 0.721198 |
d0c9e4369cc8547d9e37b25b4cf3c48f297c2c53 | 9,062 | py | Python | themes/qutebrowser/themes/minimal/base16-silk-light.config.py | base16-fork/base16-fork | 79856b7e6195dde0874a9e6d191101ac6c5c74f5 | [
"MIT"
] | 95 | 2018-05-28T18:06:48.000Z | 2022-03-14T21:36:05.000Z | themes/qutebrowser/themes/minimal/base16-silk-light.config.py | base16-fork/base16-fork | 79856b7e6195dde0874a9e6d191101ac6c5c74f5 | [
"MIT"
] | 18 | 2018-08-26T00:57:20.000Z | 2022-02-19T08:29:29.000Z | themes/qutebrowser/themes/minimal/base16-silk-light.config.py | base16-fork/base16-fork | 79856b7e6195dde0874a9e6d191101ac6c5c74f5 | [
"MIT"
] | 20 | 2018-06-21T12:41:47.000Z | 2022-03-04T22:06:20.000Z | # base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova and Daniel Mulford
# Silk Light scheme by Gabriel Fontes (https://github.com/Misterio77)
base00 = "#E9F1EF"
base01 = "#CCD4D3"
base02 = "#90B7B6"
base03 = "#5C787B"
base04 = "#4B5B5F"
base05 = "#385156"
base06 = "#0e3c46"
base07 = "#D2FAFF"
base08 = "#CF432E"
base09 = "#D27F46"
base0A = "#CFAD25"
base0B = "#6CA38C"
base0C = "#329CA2"
base0D = "#39AAC9"
base0E = "#6E6582"
base0F = "#865369"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base00
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0D
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base05
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base02
# Top border color of the selected completion item.
c.colors.completion.item.selected.border.top = base02
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base02
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base05
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base09
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color of disabled items in the context menu.
c.colors.contextmenu.disabled.bg = base01
# Foreground color of disabled items in the context menu.
c.colors.contextmenu.disabled.fg = base04
# Background color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.bg = base00
# Foreground color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.fg = base05
# Background color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.bg = base02
#Foreground color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.fg = base05
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base02
# Foreground color for the selected item in filename prompts.
c.colors.prompts.selected.fg = base05
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base05
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base0C
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base00
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base0A
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base00
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base0E
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base00
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base04
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base01
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base0E
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base01
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base0D
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base00
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base0D
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base00
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base09
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0B
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base00
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0B
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base00
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base00
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base02
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base05
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base02
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base05
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base05
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base02
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base05
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base02
# Background color for webpages if unset (or empty to use the theme's
# color).
c.colors.webpage.bg = base00
| 30.106312 | 95 | 0.771905 |
4af524bbbc9ef3816ef8a49aeb24323fb6d00265 | 15,386 | py | Python | homeassistant/helpers/collection.py | Madj42/core | 829131fe51bd09ff9032766c57bb293c89db7614 | [
"Apache-2.0"
] | null | null | null | homeassistant/helpers/collection.py | Madj42/core | 829131fe51bd09ff9032766c57bb293c89db7614 | [
"Apache-2.0"
] | null | null | null | homeassistant/helpers/collection.py | Madj42/core | 829131fe51bd09ff9032766c57bb293c89db7614 | [
"Apache-2.0"
] | null | null | null | """Helper to deal with YAML + storage."""
from abc import ABC, abstractmethod
import asyncio
from dataclasses import dataclass
import logging
from typing import Any, Awaitable, Callable, Dict, Iterable, List, Optional, cast
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.components import websocket_api
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import slugify
STORAGE_VERSION = 1
SAVE_DELAY = 10
CHANGE_ADDED = "added"
CHANGE_UPDATED = "updated"
CHANGE_REMOVED = "removed"
@dataclass
class CollectionChangeSet:
"""Class to represent a change set.
change_type: One of CHANGE_*
item_id: The id of the item
item: The item
"""
change_type: str
item_id: str
item: Any
ChangeListener = Callable[
[
# Change type
str,
# Item ID
str,
# New or removed config
dict,
],
Awaitable[None],
]
class CollectionError(HomeAssistantError):
"""Base class for collection related errors."""
class ItemNotFound(CollectionError):
"""Raised when an item is not found."""
def __init__(self, item_id: str):
"""Initialize item not found error."""
super().__init__(f"Item {item_id} not found.")
self.item_id = item_id
class IDManager:
"""Keep track of IDs across different collections."""
def __init__(self) -> None:
"""Initiate the ID manager."""
self.collections: List[Dict[str, Any]] = []
def add_collection(self, collection: Dict[str, Any]) -> None:
"""Add a collection to check for ID usage."""
self.collections.append(collection)
def has_id(self, item_id: str) -> bool:
"""Test if the ID exists."""
return any(item_id in collection for collection in self.collections)
def generate_id(self, suggestion: str) -> str:
"""Generate an ID."""
base = slugify(suggestion)
proposal = base
attempt = 1
while self.has_id(proposal):
attempt += 1
proposal = f"{base}_{attempt}"
return proposal
class ObservableCollection(ABC):
"""Base collection type that can be observed."""
def __init__(self, logger: logging.Logger, id_manager: Optional[IDManager] = None):
"""Initialize the base collection."""
self.logger = logger
self.id_manager = id_manager or IDManager()
self.data: Dict[str, dict] = {}
self.listeners: List[ChangeListener] = []
self.id_manager.add_collection(self.data)
@callback
def async_items(self) -> List[dict]:
"""Return list of items in collection."""
return list(self.data.values())
@callback
def async_add_listener(self, listener: ChangeListener) -> None:
"""Add a listener.
Will be called with (change_type, item_id, updated_config).
"""
self.listeners.append(listener)
async def notify_changes(self, change_sets: Iterable[CollectionChangeSet]) -> None:
"""Notify listeners of a change."""
await asyncio.gather(
*[
listener(change_set.change_type, change_set.item_id, change_set.item)
for listener in self.listeners
for change_set in change_sets
]
)
class YamlCollection(ObservableCollection):
"""Offer a collection based on static data."""
async def async_load(self, data: List[dict]) -> None:
"""Load the YAML collection. Overrides existing data."""
old_ids = set(self.data)
change_sets = []
for item in data:
item_id = item[CONF_ID]
if item_id in old_ids:
old_ids.remove(item_id)
event = CHANGE_UPDATED
elif self.id_manager.has_id(item_id):
self.logger.warning("Duplicate ID '%s' detected, skipping", item_id)
continue
else:
event = CHANGE_ADDED
self.data[item_id] = item
change_sets.append(CollectionChangeSet(event, item_id, item))
for item_id in old_ids:
change_sets.append(
CollectionChangeSet(CHANGE_REMOVED, item_id, self.data.pop(item_id))
)
if change_sets:
await self.notify_changes(change_sets)
class StorageCollection(ObservableCollection):
"""Offer a CRUD interface on top of JSON storage."""
def __init__(
self,
store: Store,
logger: logging.Logger,
id_manager: Optional[IDManager] = None,
):
"""Initialize the storage collection."""
super().__init__(logger, id_manager)
self.store = store
@property
def hass(self) -> HomeAssistant:
"""Home Assistant object."""
return self.store.hass
async def _async_load_data(self) -> Optional[dict]:
"""Load the data."""
return cast(Optional[dict], await self.store.async_load())
async def async_load(self) -> None:
"""Load the storage Manager."""
raw_storage = await self._async_load_data()
if raw_storage is None:
raw_storage = {"items": []}
for item in raw_storage["items"]:
self.data[item[CONF_ID]] = item
await self.notify_changes(
[
CollectionChangeSet(CHANGE_ADDED, item[CONF_ID], item)
for item in raw_storage["items"]
]
)
@abstractmethod
async def _process_create_data(self, data: dict) -> dict:
"""Validate the config is valid."""
@callback
@abstractmethod
def _get_suggested_id(self, info: dict) -> str:
"""Suggest an ID based on the config."""
@abstractmethod
async def _update_data(self, data: dict, update_data: dict) -> dict:
"""Return a new updated data object."""
async def async_create_item(self, data: dict) -> dict:
"""Create a new item."""
item = await self._process_create_data(data)
item[CONF_ID] = self.id_manager.generate_id(self._get_suggested_id(item))
self.data[item[CONF_ID]] = item
self._async_schedule_save()
await self.notify_changes(
[CollectionChangeSet(CHANGE_ADDED, item[CONF_ID], item)]
)
return item
async def async_update_item(self, item_id: str, updates: dict) -> dict:
"""Update item."""
if item_id not in self.data:
raise ItemNotFound(item_id)
if CONF_ID in updates:
raise ValueError("Cannot update ID")
current = self.data[item_id]
updated = await self._update_data(current, updates)
self.data[item_id] = updated
self._async_schedule_save()
await self.notify_changes(
[CollectionChangeSet(CHANGE_UPDATED, item_id, updated)]
)
return self.data[item_id]
async def async_delete_item(self, item_id: str) -> None:
"""Delete item."""
if item_id not in self.data:
raise ItemNotFound(item_id)
item = self.data.pop(item_id)
self._async_schedule_save()
await self.notify_changes([CollectionChangeSet(CHANGE_REMOVED, item_id, item)])
@callback
def _async_schedule_save(self) -> None:
"""Schedule saving the area registry."""
self.store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> dict:
"""Return data of area registry to store in a file."""
return {"items": list(self.data.values())}
class IDLessCollection(ObservableCollection):
"""A collection without IDs."""
counter = 0
async def async_load(self, data: List[dict]) -> None:
"""Load the collection. Overrides existing data."""
await self.notify_changes(
[
CollectionChangeSet(CHANGE_REMOVED, item_id, item)
for item_id, item in list(self.data.items())
]
)
self.data.clear()
for item in data:
self.counter += 1
item_id = f"fakeid-{self.counter}"
self.data[item_id] = item
await self.notify_changes(
[
CollectionChangeSet(CHANGE_ADDED, item_id, item)
for item_id, item in self.data.items()
]
)
@callback
def sync_entity_lifecycle(
hass: HomeAssistantType,
domain: str,
platform: str,
entity_component: EntityComponent,
collection: ObservableCollection,
create_entity: Callable[[dict], Entity],
) -> None:
"""Map a collection to an entity component."""
entities = {}
async def _collection_changed(change_type: str, item_id: str, config: dict) -> None:
"""Handle a collection change."""
if change_type == CHANGE_ADDED:
entity = create_entity(config)
await entity_component.async_add_entities([entity])
entities[item_id] = entity
return
if change_type == CHANGE_REMOVED:
ent_reg = await entity_registry.async_get_registry(hass)
ent_to_remove = ent_reg.async_get_entity_id(domain, platform, item_id)
if ent_to_remove is not None:
ent_reg.async_remove(ent_to_remove)
else:
await entities[item_id].async_remove(force_remove=True)
entities.pop(item_id)
return
# CHANGE_UPDATED
await entities[item_id].async_update_config(config) # type: ignore
collection.async_add_listener(_collection_changed)
class StorageCollectionWebsocket:
"""Class to expose storage collection management over websocket."""
def __init__(
self,
storage_collection: StorageCollection,
api_prefix: str,
model_name: str,
create_schema: dict,
update_schema: dict,
):
"""Initialize a websocket CRUD."""
self.storage_collection = storage_collection
self.api_prefix = api_prefix
self.model_name = model_name
self.create_schema = create_schema
self.update_schema = update_schema
assert self.api_prefix[-1] != "/", "API prefix should not end in /"
@property
def item_id_key(self) -> str:
"""Return item ID key."""
return f"{self.model_name}_id"
@callback
def async_setup(
self,
hass: HomeAssistant,
*,
create_list: bool = True,
create_create: bool = True,
) -> None:
"""Set up the websocket commands."""
if create_list:
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/list",
self.ws_list_item,
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): f"{self.api_prefix}/list"}
),
)
if create_create:
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/create",
websocket_api.require_admin(
websocket_api.async_response(self.ws_create_item)
),
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
**self.create_schema,
vol.Required("type"): f"{self.api_prefix}/create",
}
),
)
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/update",
websocket_api.require_admin(
websocket_api.async_response(self.ws_update_item)
),
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
**self.update_schema,
vol.Required("type"): f"{self.api_prefix}/update",
vol.Required(self.item_id_key): str,
}
),
)
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/delete",
websocket_api.require_admin(
websocket_api.async_response(self.ws_delete_item)
),
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): f"{self.api_prefix}/delete",
vol.Required(self.item_id_key): str,
}
),
)
def ws_list_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""List items."""
connection.send_result(msg["id"], self.storage_collection.async_items())
async def ws_create_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Create a item."""
try:
data = dict(msg)
data.pop("id")
data.pop("type")
item = await self.storage_collection.async_create_item(data)
connection.send_result(msg["id"], item)
except vol.Invalid as err:
connection.send_error(
msg["id"],
websocket_api.const.ERR_INVALID_FORMAT,
humanize_error(data, err),
)
except ValueError as err:
connection.send_error(
msg["id"], websocket_api.const.ERR_INVALID_FORMAT, str(err)
)
async def ws_update_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Update a item."""
data = dict(msg)
msg_id = data.pop("id")
item_id = data.pop(self.item_id_key)
data.pop("type")
try:
item = await self.storage_collection.async_update_item(item_id, data)
connection.send_result(msg_id, item)
except ItemNotFound:
connection.send_error(
msg["id"],
websocket_api.const.ERR_NOT_FOUND,
f"Unable to find {self.item_id_key} {item_id}",
)
except vol.Invalid as err:
connection.send_error(
msg["id"],
websocket_api.const.ERR_INVALID_FORMAT,
humanize_error(data, err),
)
except ValueError as err:
connection.send_error(
msg_id, websocket_api.const.ERR_INVALID_FORMAT, str(err)
)
async def ws_delete_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Delete a item."""
try:
await self.storage_collection.async_delete_item(msg[self.item_id_key])
except ItemNotFound:
connection.send_error(
msg["id"],
websocket_api.const.ERR_NOT_FOUND,
f"Unable to find {self.item_id_key} {msg[self.item_id_key]}",
)
connection.send_result(msg["id"])
| 30.957746 | 88 | 0.599181 |
e6f44023f63a2610bb13f526aafbd38799815dc5 | 1,609 | py | Python | artic/align_trim_fasta.py | ColinAnthony/fieldbioinformatics | 41f9881218ffe22476662e35ac7786ecbd950696 | [
"MIT"
] | null | null | null | artic/align_trim_fasta.py | ColinAnthony/fieldbioinformatics | 41f9881218ffe22476662e35ac7786ecbd950696 | [
"MIT"
] | null | null | null | artic/align_trim_fasta.py | ColinAnthony/fieldbioinformatics | 41f9881218ffe22476662e35ac7786ecbd950696 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#Written by Nick Loman
#Part of the ZiBRA pipeline (zibraproject.org)
import argparse
import pysam
import sys
from .align_trim import find_primer
from .vcftagprimersites import read_bed_file
def find_query_pos(alignment, reference_pos):
nearest = -1
for qry, ref in alignment.get_aligned_pairs():
if qry == None or ref == None: continue
nearest = qry
if ref >= reference_pos:
return nearest
return nearest
def go(args):
bed = read_bed_file(args.bedfile)
infile = pysam.AlignmentFile(args.alignment, "rb")
for s in infile:
#print s.get_aligned_pairs()
#print ">%s\n%s" % (s.query_name, s.query_alignment_sequence)
p1 = find_primer(bed, s.reference_start, '+')
p2 = find_primer(bed, s.reference_end, '-')
primer_start = p1[2]['start']
# start is the 5'
primer_end = p2[2]['start']
query_align_start = find_query_pos(s, primer_start)
query_align_end = find_query_pos(s, primer_end)
print("%s\t%s\t%s\t%s" % (primer_start, primer_end, primer_end - primer_start, s.query_length), sys.stderr)
startpos = max(0, query_align_start - 40)
endpos = min(query_align_end+40, s.query_length)
print(">%s\n%s" % (s.query_name, s.query_sequence[startpos:endpos]))
#query_align_end + 30])
parser = argparse.ArgumentParser(description='Trim alignments from an amplicon scheme.')
parser.add_argument('alignment', help='BAM alignment')
parser.add_argument('bedfile', help='BED file')
args = parser.parse_args()
go(args)
| 29.254545 | 115 | 0.670603 |
946ce1167b6a2b7312287cb122b627f18ae90055 | 230 | py | Python | resources/lib/teams/texans.py | Tenzer/plugin.video.nfl-teams | 66b9b1f113865095125be231e578ac1e491cca1e | [
"MIT"
] | 3 | 2017-09-29T13:21:59.000Z | 2020-06-01T03:49:52.000Z | resources/lib/teams/texans.py | Tenzer/plugin.video.nfl-teams | 66b9b1f113865095125be231e578ac1e491cca1e | [
"MIT"
] | 8 | 2015-08-17T19:28:54.000Z | 2018-07-28T16:00:41.000Z | resources/lib/teams/texans.py | Tenzer/plugin.video.nfl-teams | 66b9b1f113865095125be231e578ac1e491cca1e | [
"MIT"
] | 4 | 2017-09-15T08:36:47.000Z | 2019-03-02T20:50:56.000Z | from resources.lib.nfl2018 import NFL2018
class Team(NFL2018):
short = "texans"
hostname = "www.houstontexans.com"
def __init__(self, parameters):
self.parameters = parameters
NFL2018.__init__(self)
| 20.909091 | 41 | 0.686957 |
7e9c4ae4a64aa7c252083c57ecb072e152494e36 | 8,472 | py | Python | src/AeroMod.py | sailingfree/Python-VPP | c4730494ae86dc78260ccd94fe05c85141760360 | [
"MIT"
] | 11 | 2020-06-03T15:06:03.000Z | 2022-02-05T19:01:33.000Z | src/AeroMod.py | sailingfree/Python-VPP | c4730494ae86dc78260ccd94fe05c85141760360 | [
"MIT"
] | 2 | 2020-06-03T15:24:51.000Z | 2020-10-24T21:21:28.000Z | src/AeroMod.py | sailingfree/Python-VPP | c4730494ae86dc78260ccd94fe05c85141760360 | [
"MIT"
] | 8 | 2020-06-07T18:28:34.000Z | 2021-08-20T16:55:29.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Marin Lauber"
__copyright__ = "Copyright 2020, Marin Lauber"
__license__ = "GPL"
__version__ = "1.0.1"
__email__ = "M.Lauber@soton.ac.uk"
import numpy as np
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
from scipy.optimize import root
import matplotlib.pyplot as plt
from src.UtilsMod import build_interp_func
class AeroMod(object):
def __init__(self, Yacht, rho=1.225, mu=0.0000181):
"""
Initializes an Aero Model, given a set of sails
"""
# physical params
self.rho = rho
self.mu = mu
self.flat = 1.0
self.reef = 1.0
self.ftj = 1.0
self.rfm = 1.0
# set sails and measure what is need once
self.yacht = Yacht
self.sails = self.yacht.sails[:2]
# are we upwind?
self.up = self.sails[1].up
self._measure_sails()
self._measure_windage()
# coeffs interp function
self.fcdmult = build_interp_func("fcdmult")
self.kheff = build_interp_func("kheff")
def _measure_windage(self):
self.boa = self.yacht.boa
self.loa = self.yacht.loa
self.fbav = 0.625 * self.yacht.ff + 0.375 * self.yacht.fa
def _measure_sails(self):
self.fractionality = 1.0; b2=0.
for sail in self.sails:
sail.measure(self.rfm, self.ftj)
if sail.type == "main":
self.fractionality /= sail.P + sail.BAD
b1 = sail.P_r + sail.BAD
self.roach = sail.roach
tf = (0.16*(sail.CE-0.024)/sail.P+0.94)*sail.P+sail.BAD
if sail.type == "jib":
self.fractionality *= sail.IG_r
b2 = sail.I*sail.IG_r/sail.IG
self.overlap = sail.LPG_r / sail.J
self.HBI = sail.HBI
self.eff_span_corr = (
1.1
+ 0.08 * (self.roach - 0.2)
+ 0.5 * (0.68 + 0.31 * self.fractionality + 0.0075 * self.overlap - 1.1)
)
self.b = max(b1, b2)
# assumes no mizain mast
self.heff_height_max_spi = max(tf+self.HBI, 0)
# prototype top function in hydro mod
def update(self, vb, phi, tws, twa, flat, RED):
"""
Update the aero model for current iter
"""
self.vb = max(0, vb)
self.phi = max(0, phi)
self.tws = tws
self.twa = twa
# gradual flatening of the sails with tws increase, min is 0.62 from 17 knots
self.flat = np.where(tws<2.5, 1, np.where(tws < 8.5, 0.81 + 0.19 * np.cos((tws - 2.5) / 6 * np.pi), 0.62))
self.ftj = max(RED-1., 0.)
self.rfm = min(RED, 1.)
self._measure_sails()
self._update_windTriangle()
self._area()
self._compute_forces()
return self.Fx, self.Fy, self.Mx
def _compute_forces(self):
"""
Computes forces for equilibrium.
"""
# get new coeffs
self._get_coeffs()
# instead of writing many time
awa = self.awa / 180.0 * np.pi
# lift and drag
self.lift = 0.5 * self.rho * self.aws ** 2 * self.area * self.cl
self.drag = 0.5 * self.rho * self.aws ** 2 * self.area * self.cd + self._get_Rw(awa)
# project into yacht coordinate system
self.Fx = self.lift * np.sin(awa) - self.drag * np.cos(awa)
self.Fy = self.lift * np.cos(awa) + self.drag * np.sin(awa)
# heeling moment
self.Mx = self.Fy * self._vce() * np.cos(self.phi / 180.0 * np.pi)
# side-force is horizontal component of Fh
self.Fy *= np.cos(np.deg2rad(self.phi))
def _get_Rw(self, awa):
Rw = 0.5 * self.rho * self.aws ** 2 * self._get_Aref(awa) * 0.816
return Rw * np.cos(awa / 180.0 * np.pi)
def _get_Aref(self, awa):
# only hull part
d = 0.5 * (1 - np.cos(awa / 90.0 * np.pi))
return self.fbav * ((1 - d) * self.boa + d * self.loa)
def _get_coeffs(self):
"""
generate sail-set total lift and drag coefficient.
"""
# lift (Clmax) and parasitic drag (Cd0max)
self.cl = 0.0
self.cd = 0.0
kpp = 0.0
for sail in self.sails:
self.cl += sail.cl(self.awa) * sail.area * sail.bk
self.cd += sail.cd(self.awa) * sail.area * sail.bk
kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp
self.cl /= self.area
self.cd /= self.area
# viscous quadratic parasitic drag and induced drag
devisor_1 = self.area * self.cl ** 2
devisor_2 = np.pi * self._heff(self.awa) ** 2
self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)
# fraction of parasitic drag due to jib
self.fcdj = 0.0
for sail in self.sails:
if sail.type == "jib":
self.fcdj = (
sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)
)
# final lift and drag
self.cd = self.cd * (
self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)
) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)
self.cl = self.flat * self.cl
def _update_windTriangle(self):
"""
find AWS and AWA for a given TWS, TWA and VB
"""
_awa_ = lambda awa: self.vb * np.sin(awa / 180.0 * np.pi) - self.tws * np.sin(
(self.twa - awa) / 180.0 * np.pi
)
self.awa = fsolve(_awa_, self.twa)[0]
self.aws = np.sqrt(
(self.tws * np.sin(self.twa / 180.0 * np.pi)) ** 2
+ (self.tws * np.cos(self.twa / 180.0 * np.pi) + self.vb) ** 2
)
def _area(self):
"""
Fill sail area variable
"""
self.area = 0.0
for sail in self.sails:
self.area += sail.area
def _vce(self):
"""
Vectical centre of effort lift/drag weigted
"""
sum = 0.0
for sail in self.sails:
cl2 = sail.cl(self.awa)**2
cd2 = sail.cd(self.awa)**2
sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)
self._area()
deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG
Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH
return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))
def phi_up(self):
"""
heel angle correction for AWA and AWS (5.51), this is in Radians!
"""
return 0.5 * (self.phi + 10 * (self.phi / 30.0) ** 2) / 180.0 * np.pi
def _heff(self, awa):
awa = max(0, min(awa, 90))
if self.up:
cheff = self.eff_span_corr * self.kheff(awa)
else:
cheff = 1.0 / self.b * self.reef * self.heff_height_max_spi
return (self.b + self.HBI) * cheff
#
# -- utility functions
#
def debbug(self):
for sail in self.yacht.sails:
sail.debbug_coeffs()
flat = np.linspace(0, 1, 64)
awa = np.linspace(0, 90, 64)
res1 = np.empty_like(flat)
res2 = np.empty_like(awa)
for i in range(64):
res1[i] = self.fcdmult(flat[i])
res2[i] = self.kheff(awa[i])
plt.plot(flat, res1)
plt.show()
plt.plot(awa, res2)
plt.show()
def print_state(self):
self.update(self.vb, self.phi, self.tws, self.twa, self.twa)
print("AeroMod state:")
print(" TWA is : %.2f (deg)" % self.twa)
print(" TWS is : %.2f (m/s)" % self.tws)
print(" AWA is : %.2f (deg)" % self.awa)
print(" AWS is : %.2f (m/s)" % self.aws)
print(" Vb is : %.2f (m/s)" % self.vb)
print(" Heel is : %.2f (deg)" % self.phi)
print(" Drive is: %.2f (N)" % self.Fx)
print(" SSF is : %.2f (N)" % self.Fy)
print(" HM is : %.2f (Nm)" % self.Mx)
print(" Cl is : %.2f (-)" % self.cl)
print(" Cd is : %.2f (-)" % self.cd)
print(" Flat is : %.2f (-)" % self.flat)
print(" Sail area:")
for sail in self.sails:
print(" - " + sail.type + " : %.2f (m^2)" % sail.area)
# if __name__ == "__main__":
# aero = AeroMod(sails=[Main(24.5, 5.5),
# Jib(17.3, 4.4)])
# aero.debbug()
# aero.print_state()
| 31.969811 | 114 | 0.522781 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.