hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7950095ff831d383e51d9132564bd0e35662127c
| 22,682
|
py
|
Python
|
src/robot/conf/settings.py
|
ishandutta2007/robotframework
|
f8fd5bf91c649506049e5c21766a47365cef8736
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-25T20:31:22.000Z
|
2018-11-25T20:31:22.000Z
|
src/robot/conf/settings.py
|
ishandutta2007/robotframework
|
f8fd5bf91c649506049e5c21766a47365cef8736
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/conf/settings.py
|
ishandutta2007/robotframework
|
f8fd5bf91c649506049e5c21766a47365cef8736
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-25T20:31:26.000Z
|
2018-11-25T20:31:26.000Z
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import sys
import time
from robot.errors import DataError, FrameworkError
from robot.output import LOGGER, loggerhelper
from robot.result.keywordremover import KeywordRemover
from robot.result.flattenkeywordmatcher import validate_flatten_keyword
from robot.utils import (abspath, create_destination_directory, escape,
format_time, get_link_path, html_escape, is_list_like,
py2to3, split_args_from_name_or_path)
from .gatherfailed import gather_failed_tests, gather_failed_suites
@py2to3
class _BaseSettings(object):
_cli_opts = {'RPA' : ('rpa', None),
'Name' : ('name', None),
'Doc' : ('doc', None),
'Metadata' : ('metadata', []),
'TestNames' : ('test', []),
'TaskNames' : ('task', []),
'ReRunFailed' : ('rerunfailed', 'NONE'),
'ReRunFailedSuites': ('rerunfailedsuites', 'NONE'),
'SuiteNames' : ('suite', []),
'SetTag' : ('settag', []),
'Include' : ('include', []),
'Exclude' : ('exclude', []),
'Critical' : ('critical', None),
'NonCritical' : ('noncritical', None),
'OutputDir' : ('outputdir', abspath('.')),
'Log' : ('log', 'log.html'),
'Report' : ('report', 'report.html'),
'XUnit' : ('xunit', None),
'SplitLog' : ('splitlog', False),
'TimestampOutputs' : ('timestampoutputs', False),
'LogTitle' : ('logtitle', None),
'ReportTitle' : ('reporttitle', None),
'ReportBackground' : ('reportbackground',
('#9e9', '#9e9', '#f66')),
'SuiteStatLevel' : ('suitestatlevel', -1),
'TagStatInclude' : ('tagstatinclude', []),
'TagStatExclude' : ('tagstatexclude', []),
'TagStatCombine' : ('tagstatcombine', []),
'TagDoc' : ('tagdoc', []),
'TagStatLink' : ('tagstatlink', []),
'RemoveKeywords' : ('removekeywords', []),
'ExpandKeywords' : ('expandkeywords', []),
'FlattenKeywords' : ('flattenkeywords', []),
'PreRebotModifiers': ('prerebotmodifier', []),
'StatusRC' : ('statusrc', True),
'ConsoleColors' : ('consolecolors', 'AUTO'),
'StdOut' : ('stdout', None),
'StdErr' : ('stderr', None),
'XUnitSkipNonCritical' : ('xunitskipnoncritical', False)}
_output_opts = ['Output', 'Log', 'Report', 'XUnit', 'DebugFile']
def __init__(self, options=None, **extra_options):
self.start_timestamp = format_time(time.time(), '', '-', '')
self._opts = {}
self._cli_opts = self._cli_opts.copy()
self._cli_opts.update(self._extra_cli_opts)
self._process_cli_opts(dict(options or {}, **extra_options))
def _process_cli_opts(self, opts):
for name, (cli_name, default) in self._cli_opts.items():
value = opts[cli_name] if cli_name in opts else default
if isinstance(default, list):
# Copy mutable values and support list values as scalars.
value = list(value) if is_list_like(value) else [value]
self[name] = self._process_value(name, value)
self['TestNames'] += self['ReRunFailed'] + self['TaskNames']
self['SuiteNames'] += self['ReRunFailedSuites']
def __setitem__(self, name, value):
if name not in self._cli_opts:
raise KeyError("Non-existing option '%s'." % name)
self._opts[name] = value
def _process_value(self, name, value):
if name == 'ReRunFailed':
return gather_failed_tests(value)
if name == 'ReRunFailedSuites':
return gather_failed_suites(value)
if name == 'LogLevel':
return self._process_log_level(value)
if value == self._get_default_value(name):
return value
if name == 'Doc':
return self._escape_as_data(value)
if name in ['Metadata', 'TagDoc']:
if name == 'Metadata':
value = [self._escape_as_data(v) for v in value]
return [self._process_metadata_or_tagdoc(v) for v in value]
if name in ['Include', 'Exclude']:
return [self._format_tag_patterns(v) for v in value]
if name in self._output_opts and (not value or value.upper() == 'NONE'):
return None
if name == 'OutputDir':
return abspath(value)
if name in ['SuiteStatLevel', 'ConsoleWidth']:
return self._convert_to_positive_integer_or_default(name, value)
if name == 'VariableFiles':
return [split_args_from_name_or_path(item) for item in value]
if name == 'ReportBackground':
return self._process_report_background(value)
if name == 'TagStatCombine':
return [self._process_tag_stat_combine(v) for v in value]
if name == 'TagStatLink':
return [v for v in [self._process_tag_stat_link(v) for v in value] if v]
if name == 'Randomize':
return self._process_randomize_value(value)
if name == 'MaxErrorLines':
return self._process_max_error_lines(value)
if name == 'RemoveKeywords':
self._validate_remove_keywords(value)
if name == 'FlattenKeywords':
self._validate_flatten_keywords(value)
if name == 'ExpandKeywords':
self._validate_expandkeywords(value)
return value
def _escape_as_data(self, value):
return value
def _process_log_level(self, level):
level, visible_level = self._split_log_level(level.upper())
self._opts['VisibleLogLevel'] = visible_level
return level
def _split_log_level(self, level):
if ':' in level:
level, visible_level = level.split(':', 1)
else:
visible_level = level
self._validate_log_level_and_default(level, visible_level)
return level, visible_level
def _validate_log_level_and_default(self, log_level, default):
if log_level not in loggerhelper.LEVELS:
raise DataError("Invalid log level '%s'" % log_level)
if default not in loggerhelper.LEVELS:
raise DataError("Invalid log level '%s'" % default)
if not loggerhelper.IsLogged(log_level)(default):
raise DataError("Default visible log level '%s' is lower than "
"log level '%s'" % (default, log_level))
def _process_max_error_lines(self, value):
if not value or value.upper() == 'NONE':
return None
value = self._convert_to_integer('maxerrorlines', value)
if value < 10:
raise DataError("Option '--maxerrorlines' expected an integer "
"value greater that 10 but got '%s'." % value)
return value
def _process_randomize_value(self, original):
value = original.lower()
if ':' in value:
value, seed = value.split(':', 1)
else:
seed = random.randint(0, sys.maxsize)
if value in ('test', 'suite'):
value += 's'
if value not in ('tests', 'suites', 'none', 'all'):
self._raise_invalid_option_value('--randomize', original)
try:
seed = int(seed)
except ValueError:
self._raise_invalid_option_value('--randomize', original)
return value, seed
def _raise_invalid_option_value(self, option_name, given_value):
raise DataError("Option '%s' does not support value '%s'."
% (option_name, given_value))
def __getitem__(self, name):
if name not in self._opts:
raise KeyError("Non-existing option '%s'." % name)
if name in self._output_opts:
return self._get_output_file(name)
return self._opts[name]
def _get_output_file(self, option):
"""Returns path of the requested output file and creates needed dirs.
`option` can be 'Output', 'Log', 'Report', 'XUnit' or 'DebugFile'.
"""
name = self._opts[option]
if not name:
return None
if option == 'Log' and self._output_disabled():
self['Log'] = None
LOGGER.error('Log file is not created if output.xml is disabled.')
return None
name = self._process_output_name(option, name)
path = abspath(os.path.join(self['OutputDir'], name))
create_destination_directory(path, '%s file' % option.lower())
return path
def _process_output_name(self, option, name):
base, ext = os.path.splitext(name)
if self['TimestampOutputs']:
base = '%s-%s' % (base, self.start_timestamp)
ext = self._get_output_extension(ext, option)
return base + ext
def _get_output_extension(self, ext, type_):
if ext != '':
return ext
if type_ in ['Output', 'XUnit']:
return '.xml'
if type_ in ['Log', 'Report']:
return '.html'
if type_ == 'DebugFile':
return '.txt'
raise FrameworkError("Invalid output file type: %s" % type_)
def _process_metadata_or_tagdoc(self, value):
if ':' in value:
return value.split(':', 1)
return value, ''
def _process_report_background(self, colors):
if colors.count(':') not in [1, 2]:
raise DataError("Invalid report background colors '%s'." % colors)
colors = colors.split(':')
if len(colors) == 2:
return colors[0], colors[0], colors[1]
return tuple(colors)
def _process_tag_stat_combine(self, pattern):
if ':' in pattern:
pattern, title = pattern.rsplit(':', 1)
else:
title = ''
return self._format_tag_patterns(pattern), title
def _format_tag_patterns(self, pattern):
for search, replace in [('&', 'AND'), ('AND', ' AND '), ('OR', ' OR '),
('NOT', ' NOT '), ('_', ' ')]:
if search in pattern:
pattern = pattern.replace(search, replace)
while ' ' in pattern:
pattern = pattern.replace(' ', ' ')
if pattern.startswith(' NOT'):
pattern = pattern[1:]
return pattern
def _process_tag_stat_link(self, value):
tokens = value.split(':')
if len(tokens) >= 3:
return tokens[0], ':'.join(tokens[1:-1]), tokens[-1]
raise DataError("Invalid format for option '--tagstatlink'. "
"Expected 'tag:link:title' but got '%s'." % value)
def _convert_to_positive_integer_or_default(self, name, value):
value = self._convert_to_integer(name, value)
return value if value > 0 else self._get_default_value(name)
def _convert_to_integer(self, name, value):
try:
return int(value)
except ValueError:
raise DataError("Option '--%s' expected integer value but got '%s'."
% (name.lower(), value))
def _get_default_value(self, name):
return self._cli_opts[name][1]
def _validate_remove_keywords(self, values):
for value in values:
try:
KeywordRemover(value)
except DataError as err:
raise DataError("Invalid value for option '--removekeywords'. %s" % err)
def _validate_flatten_keywords(self, values):
try:
validate_flatten_keyword(values)
except DataError as err:
raise DataError("Invalid value for option '--flattenkeywords'. %s" % err)
def _validate_expandkeywords(self, values):
for opt in values:
if not opt.lower().startswith(('name:', 'tag:')):
raise DataError("Invalid value for option '--expandkeywords'. "
"Expected 'TAG:<pattern>', or "
"'NAME:<pattern>' but got '%s'." % opt)
def __contains__(self, setting):
return setting in self._cli_opts
def __unicode__(self):
return '\n'.join('%s: %s' % (name, self._opts[name])
for name in sorted(self._opts))
@property
def output_directory(self):
return self['OutputDir']
@property
def output(self):
return self['Output']
@property
def log(self):
return self['Log']
@property
def report(self):
return self['Report']
@property
def xunit(self):
return self['XUnit']
@property
def log_level(self):
return self['LogLevel']
@property
def split_log(self):
return self['SplitLog']
@property
def status_rc(self):
return self['StatusRC']
@property
def xunit_skip_noncritical(self):
return self['XUnitSkipNonCritical']
@property
def statistics_config(self):
return {
'suite_stat_level': self['SuiteStatLevel'],
'tag_stat_include': self['TagStatInclude'],
'tag_stat_exclude': self['TagStatExclude'],
'tag_stat_combine': self['TagStatCombine'],
'tag_stat_link': self['TagStatLink'],
'tag_doc': self['TagDoc'],
}
@property
def critical_tags(self):
return self['Critical']
@property
def non_critical_tags(self):
return self['NonCritical']
@property
def remove_keywords(self):
return self['RemoveKeywords']
@property
def flatten_keywords(self):
return self['FlattenKeywords']
@property
def pre_rebot_modifiers(self):
return self['PreRebotModifiers']
@property
def console_colors(self):
return self['ConsoleColors']
@property
def rpa(self):
return self['RPA']
@rpa.setter
def rpa(self, value):
self['RPA'] = value
class RobotSettings(_BaseSettings):
_extra_cli_opts = {'Extension' : ('extension', None),
'Output' : ('output', 'output.xml'),
'LogLevel' : ('loglevel', 'INFO'),
'MaxErrorLines' : ('maxerrorlines', 40),
'DryRun' : ('dryrun', False),
'ExitOnFailure' : ('exitonfailure', False),
'ExitOnError' : ('exitonerror', False),
'SkipTeardownOnExit' : ('skipteardownonexit', False),
'Randomize' : ('randomize', 'NONE'),
'RunEmptySuite' : ('runemptysuite', False),
'Variables' : ('variable', []),
'VariableFiles' : ('variablefile', []),
'PreRunModifiers' : ('prerunmodifier', []),
'Listeners' : ('listener', []),
'ConsoleType' : ('console', 'verbose'),
'ConsoleTypeDotted' : ('dotted', False),
'ConsoleTypeQuiet' : ('quiet', False),
'ConsoleWidth' : ('consolewidth', 78),
'ConsoleMarkers' : ('consolemarkers', 'AUTO'),
'DebugFile' : ('debugfile', None)}
def get_rebot_settings(self):
settings = RebotSettings()
settings.start_timestamp = self.start_timestamp
settings._opts.update(self._opts)
for name in ['Variables', 'VariableFiles', 'Listeners']:
del(settings._opts[name])
for name in ['Include', 'Exclude', 'TestNames', 'SuiteNames', 'Metadata']:
settings._opts[name] = []
for name in ['Name', 'Doc']:
settings._opts[name] = None
settings._opts['Output'] = None
settings._opts['LogLevel'] = 'TRACE'
settings._opts['ProcessEmptySuite'] = self['RunEmptySuite']
settings._opts['ExpandKeywords'] = self['ExpandKeywords']
return settings
def _output_disabled(self):
return self.output is None
def _escape_as_data(self, value):
return escape(value)
@property
def listeners(self):
return self['Listeners']
@property
def debug_file(self):
return self['DebugFile']
@property
def suite_config(self):
return {
'name': self['Name'],
'doc': self['Doc'],
'metadata': dict(self['Metadata']),
'set_tags': self['SetTag'],
'include_tags': self['Include'],
'exclude_tags': self['Exclude'],
'include_suites': self['SuiteNames'],
'include_tests': self['TestNames'],
'empty_suite_ok': self.run_empty_suite,
'randomize_suites': self.randomize_suites,
'randomize_tests': self.randomize_tests,
'randomize_seed': self.randomize_seed,
}
@property
def randomize_seed(self):
return self['Randomize'][1]
@property
def randomize_suites(self):
return self['Randomize'][0] in ('suites', 'all')
@property
def randomize_tests(self):
return self['Randomize'][0] in ('tests', 'all')
@property
def dry_run(self):
return self['DryRun']
@property
def exit_on_failure(self):
return self['ExitOnFailure']
@property
def exit_on_error(self):
return self['ExitOnError']
@property
def skip_teardown_on_exit(self):
return self['SkipTeardownOnExit']
@property
def console_output_config(self):
return {
'type': self.console_type,
'width': self.console_width,
'colors': self.console_colors,
'markers': self.console_markers,
'stdout': self['StdOut'],
'stderr': self['StdErr']
}
@property
def console_type(self):
if self['ConsoleTypeQuiet']:
return 'quiet'
if self['ConsoleTypeDotted']:
return 'dotted'
return self['ConsoleType']
@property
def console_width(self):
return self['ConsoleWidth']
@property
def console_markers(self):
return self['ConsoleMarkers']
@property
def max_error_lines(self):
return self['MaxErrorLines']
@property
def pre_run_modifiers(self):
return self['PreRunModifiers']
@property
def run_empty_suite(self):
return self['RunEmptySuite']
@property
def variables(self):
return self['Variables']
@property
def variable_files(self):
return self['VariableFiles']
@property
def extension(self):
return self['Extension']
class RebotSettings(_BaseSettings):
_extra_cli_opts = {'Output' : ('output', None),
'LogLevel' : ('loglevel', 'TRACE'),
'ProcessEmptySuite' : ('processemptysuite', False),
'StartTime' : ('starttime', None),
'EndTime' : ('endtime', None),
'Merge' : ('merge', False)}
def _output_disabled(self):
return False
@property
def suite_config(self):
return {
'name': self['Name'],
'doc': self['Doc'],
'metadata': dict(self['Metadata']),
'set_tags': self['SetTag'],
'include_tags': self['Include'],
'exclude_tags': self['Exclude'],
'include_suites': self['SuiteNames'],
'include_tests': self['TestNames'],
'empty_suite_ok': self.process_empty_suite,
'remove_keywords': self.remove_keywords,
'log_level': self['LogLevel'],
'critical_tags': self.critical_tags,
'non_critical_tags': self.non_critical_tags,
'start_time': self['StartTime'],
'end_time': self['EndTime']
}
@property
def log_config(self):
if not self.log:
return {}
return {
'rpa': self.rpa,
'title': html_escape(self['LogTitle'] or ''),
'reportURL': self._url_from_path(self.log, self.report),
'splitLogBase': os.path.basename(os.path.splitext(self.log)[0]),
'defaultLevel': self['VisibleLogLevel']
}
@property
def report_config(self):
if not self.report:
return {}
return {
'rpa': self.rpa,
'title': html_escape(self['ReportTitle'] or ''),
'logURL': self._url_from_path(self.report, self.log),
'background' : self._resolve_background_colors()
}
def _url_from_path(self, source, destination):
if not destination:
return None
return get_link_path(destination, os.path.dirname(source))
def _resolve_background_colors(self):
colors = self['ReportBackground']
return {'pass': colors[0], 'nonCriticalFail': colors[1], 'fail': colors[2]}
@property
def merge(self):
return self['Merge']
@property
def console_output_config(self):
return {
'colors': self.console_colors,
'stdout': self['StdOut'],
'stderr': self['StdErr']
}
@property
def process_empty_suite(self):
return self['ProcessEmptySuite']
@property
def expand_keywords(self):
return self['ExpandKeywords']
| 36.2912
| 88
| 0.554228
|
795009c9b02f99279956290f6d81cc4bb4aa66e4
| 997
|
py
|
Python
|
test/AggregateRun.py
|
takanokage/blocksWorld
|
3ad99531c60551e2c51a7083891c724d35148686
|
[
"MIT"
] | 1
|
2017-08-09T21:23:28.000Z
|
2017-08-09T21:23:28.000Z
|
test/AggregateRun.py
|
takanokage/BlocksWorld
|
3ad99531c60551e2c51a7083891c724d35148686
|
[
"MIT"
] | 4
|
2018-10-10T16:09:31.000Z
|
2020-07-12T23:18:44.000Z
|
test/AggregateRun.py
|
takanokage/BlocksWorld
|
3ad99531c60551e2c51a7083891c724d35148686
|
[
"MIT"
] | 8
|
2018-09-05T21:16:52.000Z
|
2019-07-02T05:27:31.000Z
|
from BuildDataSet import *
#
# load the data set
Set1 = CreateDataSet()
# show the aggregate object before applying any actions
Set1.DisplayImage(saveImage='True',showImage='True')
# apply rotation by -30 degree ( minus means clockwise direction) for first aggregate object and 10 for the second
# in case there were more than one aggregate object.
Set1.AggregateRotation([-30,10])
# Update the aggregate object parameters
Set1.UpdateVertices()
Set1.UpdateCenters()
# apply translation
Set1.AggregateTranslate([50,10])
# Update the aggregate object parameters
Set1.UpdateVertices()
Set1.UpdateCenters()
# apply scaling by factor 2.
Set1.AggregateScaling([0.5,1])
# Update the aggregate object parameters
Set1.UpdateVertices()
Set1.UpdateCenters()
# finally choose to save to a image, show or both for data set.
Set1.DisplayImage(saveImage='True',showImage='True')
# At the end save the whole data set into a json file so we can replicita the exact data set once again.
Set1.SaveData('Set1.json')
| 35.607143
| 114
| 0.782347
|
79500a144c18fc61e43eeb7b685fd5ee0777ed82
| 6,214
|
py
|
Python
|
python/datamongo/collections/supply_dimensions.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datamongo/collections/supply_dimensions.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datamongo/collections/supply_dimensions.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from pandas import DataFrame
from base import BaseObject
from datamongo.collections import Employees
from datamongo.collections import BandRegion
from datamongo.core.bp import CendantCollection
from datamongo.core.dmo import BaseMongoClient
from datamongo.core.dmo import BaseMongoHelper
class SupplyDimensions(BaseObject):
""" Collection Wrapper over MongoDB Collection
for "supply_dimensions" """
_records = None
def __init__(self,
some_base_client=None):
"""
Created:
17-Apr-2019
craig.trim@ibm.com
"""
BaseObject.__init__(self, __name__)
if not some_base_client:
some_base_client = BaseMongoClient()
self.cendant_collection = CendantCollection(some_base_client=some_base_client,
some_db_name="cendant",
some_collection_name="supply_dimensions")
self.base_client = some_base_client
self.mongo_collection = self.cendant_collection.collection
self.helper = BaseMongoHelper(self.mongo_collection)
self.band_region = BandRegion(self.base_client)
self._employees = Employees(self.base_client)
def all(self) -> list:
if not self._records:
self._records = self.cendant_collection.all()
return self._records
def histogram(self) -> DataFrame:
from datamongo.slots.dmo import DimensionFrequency
return DimensionFrequency(self.all()).process(as_dataframe=True)
def value_by_cnum(self):
d_records = {}
for record in self.all():
values = []
for slot in record["slots"]:
values.append(record["slots"][slot])
d_records[record["key_field"]] = values
return d_records
def weight_by_cnum(self):
d_records = {}
for record in self.all():
values = []
for slot in record["slots"]:
values.append(record["slots"][slot]["weight"])
d_records[record["key_field"]] = values
return d_records
def _dim_values_by_keyed_param(self,
d_cnum_by_param: dict) -> dict:
"""
:param d_cnum_by_param:
e.g. a dictionary of CNUMs keyed by Region or Band
these dictionaries are generated from BandRegion
:return:
a dictionary
"""
d_value_by_cnum = self.value_by_cnum()
d_param_values = {}
for param in d_cnum_by_param:
param_values = []
for cnum in d_cnum_by_param[param]:
if cnum in d_value_by_cnum:
param_values.append(d_value_by_cnum[cnum])
d_param_values[param] = param_values
return d_param_values
def dim_values_by_region(self) -> dict:
"""
:return:
a dictionary keyed by region
with lists of dimension values
sample output:
{ 'AP': [ 6.00, 13.5, 0.00, 18.0, 10.5, 0.00, 10.5, 4.25 ],
[ 1.00, 1.75, 1.00, 9.00, 14.5, 4.00, 14.5, 1.00 ],
...
[ 1.50, 4.50, 0.00, 4.00, 1.00, 1.00, 3.50, 0.00 ]}
"""
d = self.band_region.region_by_cnum(reverse=True)
return self._dim_values_by_keyed_param(d)
def dim_values_by_band(self) -> dict:
"""
:return:
a dictionary keyed by band
with lists of dimension values
sample output:
{ '07': [ 6.00, 13.5, 0.00, 18.0, 10.5, 0.00, 10.5, 4.25 ],
[ 1.00, 1.75, 1.00, 9.00, 14.5, 4.00, 14.5, 1.00 ],
...
[ 1.50, 4.50, 0.00, 4.00, 1.00, 1.00, 3.50, 0.00 ]}
"""
d = self.band_region.band_by_cnum(reverse=True)
return self._dim_values_by_keyed_param(d)
def dim_values_by_region_and_band(self) -> dict:
"""
:return:
a dictionary keyed by band
with lists of dimension values
sample output:
{ '07': [ 6.00, 13.5, 0.00, 18.0, 10.5, 0.00, 10.5, 4.25 ],
[ 1.00, 1.75, 1.00, 9.00, 14.5, 4.00, 14.5, 1.00 ],
...
[ 1.50, 4.50, 0.00, 4.00, 1.00, 1.00, 3.50, 0.00 ]}
"""
d_cnum_by_region = self.band_region.region_by_cnum(reverse=True)
d_band_by_cnum = self.band_region.band_by_cnum(reverse=False)
d_value_by_cnum = self.value_by_cnum()
d_region = {}
for region in d_cnum_by_region:
if region not in d_region:
d_region[region] = {}
for cnum in d_cnum_by_region[region]:
if cnum not in d_value_by_cnum:
continue
band = d_band_by_cnum[cnum]
if band not in d_region[region]:
d_region[region][band] = []
d_region[region][band].append(d_value_by_cnum[cnum])
return d_region
def by_value_sum(self,
minimum_value_sum: int = None,
maximum_value_sum: int = None,
key_fields_only: bool = False) -> list:
from datamongo.slots.dmo import SlotValueFilter
return SlotValueFilter(some_records=self.all()).process(minimum_value_sum=minimum_value_sum,
maximum_value_sum=maximum_value_sum,
key_fields_only=key_fields_only)
def reverse_index(self,
slot_name: str) -> DataFrame:
from datamongo.slots.dmo import ReverseSlotIndex
return ReverseSlotIndex(some_records=self.all(),
some_slot_name=slot_name).process(sort_ascending=True)
if __name__=="__main__":
r = SupplyDimensions().by_value_sum(100, 0, True)
print (len(r))
| 34.715084
| 100
| 0.540232
|
79500a21eec48f52921c1ef99795a9403445eb65
| 1,353
|
py
|
Python
|
widgets/menubar.py
|
ostash-group/GeneticPlotter
|
09389407c6d89b8f525f247e7a8af6ff0de1d665
|
[
"MIT"
] | 3
|
2021-02-02T18:10:18.000Z
|
2021-07-27T14:20:20.000Z
|
widgets/menubar.py
|
ostash-group/GeneticPlotter
|
09389407c6d89b8f525f247e7a8af6ff0de1d665
|
[
"MIT"
] | null | null | null |
widgets/menubar.py
|
ostash-group/GeneticPlotter
|
09389407c6d89b8f525f247e7a8af6ff0de1d665
|
[
"MIT"
] | null | null | null |
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QWidget
from PyQt5.QtWidgets import (QTextEdit, QPushButton, QMessageBox, QHBoxLayout,
QAction, QLabel, QMenuBar, QAction, QListWidget,
QStyleFactory)
from PyQt5.QtGui import QKeySequence, QIcon
from PyQt5.QtCore import Qt,QFile,QTextStream, QDir,QFileInfo
# ToolBar
class MenuBar(QMenuBar):
def __init__(self):
super().__init__()
self.currentFile = "No opened files found"
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.init_ui()
def init_ui(self):
self.fileMenu = self.addMenu("File")
self.plotMenu = self.addMenu("Plot")
self.helpMenu = self.addMenu("Help")
self.helpMenu.addAction(self.aboutAct)
def about(self):
QMessageBox.about(self, "About Application",
""" <h1>Genetic Plotter</h1>This application has comfortable\r
and friendly (easy-to-use) graphical user interface.\n
Genetic Plotter allows to conduct some types of\n
genetic data analysis and to visualize the result.""")
| 27.06
| 79
| 0.593496
|
79500a820e0789a4b3dbbcd5b155b0ec3c63edf4
| 5,968
|
py
|
Python
|
src/tests/ftest/control/dmg_telemetry_io_basic.py
|
fedepad/daos
|
ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa
|
[
"BSD-2-Clause-Patent"
] | 429
|
2016-09-28T20:43:20.000Z
|
2022-03-25T01:22:50.000Z
|
src/tests/ftest/control/dmg_telemetry_io_basic.py
|
fedepad/daos
|
ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa
|
[
"BSD-2-Clause-Patent"
] | 6,341
|
2016-11-24T12:34:26.000Z
|
2022-03-31T23:53:46.000Z
|
src/tests/ftest/control/dmg_telemetry_io_basic.py
|
fedepad/daos
|
ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa
|
[
"BSD-2-Clause-Patent"
] | 202
|
2016-10-30T14:47:53.000Z
|
2022-03-30T21:29:11.000Z
|
#!/usr/bin/python
"""
(C) Copyright 2018-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from avocado.core.exceptions import TestFail
from ior_test_base import IorTestBase
from telemetry_test_base import TestWithTelemetry
from telemetry_utils import TelemetryUtils
class TestWithTelemetryIOBasic(IorTestBase,TestWithTelemetry):
# pylint: disable=too-many-ancestors
# pylint: disable=too-many-nested-blocks
"""Test telemetry engine io basic metrics.
:avocado: recursive
"""
def verify_io_test_metrics(self, io_test_metrics, metrics_data, threshold):
""" Verify telemetry io metrics from metrics_data.
Args:
io_test_metrics (list): list of telemetry io metrics.
metrics_data (dict): a dictionary of host keys linked to a
list of io metric names.
threshold (int): test io metrics threshold.
"""
status = True
for name in sorted(io_test_metrics):
self.log.info(" --telemetry metric: %s", name)
self.log.info(
" %-9s %-12s %-4s %-6s %-6s %s",
"TestLoop", "Host", "Rank", "Target", "Size", "Value")
for key in sorted(metrics_data):
m_data = metrics_data[key]
if key == 0:
testloop = "Initial"
else:
testloop = str(key)
for host in sorted(m_data[name]):
for rank in sorted(m_data[name][host]):
for target in sorted(m_data[name][host][rank]):
for size in sorted(
m_data[name][host][rank][target]):
value = m_data[name][host][rank][target][size]
invalid = ""
#Verify value within range
if (value < threshold[0]
or value >= threshold[1]):
status = False
invalid = "*out of valid range"
#Verify if min < max
if "_min" in name:
name2 = name.replace("_min", "_max")
if value > m_data\
[name2][host][rank][target][size]:
status = False
invalid += " *_min > _max"
#Verify if value decremental
if ("_min" in name or \
"_max" in name) and key > 0:
if value < metrics_data[key-1]\
[name][host][rank][target][size]:
status = False
invalid += " *value decreased"
self.log.info(
" %-9s %-12s %-4s %-6s %-6s %s %s",
testloop, host, rank, target, size, value,
invalid)
if not status:
self.fail("##Telemetry test io metrics verification failed.")
def display_io_test_metrics(self, metrics_data):
""" Display metrics_data.
Args:
metrics_data (dict): a dictionary of host keys linked to a
list of io metric names.
"""
for key in sorted(metrics_data):
self.log.info(
"\n %12s: %s",
"Initial " if key == 0 else "Test Loop {}".format(key),
metrics_data[key])
def test_io_telmetry_metrics_basic(self):
"""JIRA ID: DAOS-5241
Create files of 500M and 1M with transfer size 1M to verify the
DAOS engine IO telemetry basic metrics infrastructure.
:avocado: tags=all,pr,daily_regression
:avocado: tags=vm
:avocado: tags=control,telemetry
:avocado: tags=test_with_telemetry_basic,test_io_telemetry
:avocado: tags=test_io_telemetry_basic
"""
block_sizes = self.params.get("block_sizes", "/run/*")
transfer_sizes = self.params.get("transfer_sizes", "/run/*")
threshold = self.params.get("io_test_metrics_valid", "/run/*")
test_metrics = TelemetryUtils.ENGINE_IO_DTX_COMMITTED_METRICS +\
TelemetryUtils.ENGINE_IO_OPS_FETCH_ACTIVE_METRICS +\
TelemetryUtils.ENGINE_IO_OPS_UPDATE_ACTIVE_METRICS
i = 0
self.add_pool(connect=False)
self.add_container(pool=self.pool)
metrics_data = {}
for block_size in block_sizes:
for transfer_size in transfer_sizes:
metrics_data[i] = self.telemetry.get_io_metrics(test_metrics)
i += 1
self.log.info("==Start ior testloop: %s, Block Size = %s, "
"transfer_size = %s", i, block_size,
transfer_size)
self.ior_cmd.block_size.update(block_size)
self.ior_cmd.transfer_size.update(transfer_size)
test_file_suffix = "_{}".format(i)
# Run ior command.
try:
self.run_ior_with_pool(
timeout=200, create_pool=False, create_cont=False,
test_file_suffix=test_file_suffix)
except TestFail:
self.log.info("#ior command failed!")
metrics_data[i] = self.telemetry.get_io_metrics(test_metrics)
self.display_io_test_metrics(metrics_data)
self.verify_io_test_metrics(test_metrics, metrics_data, threshold)
self.log.info("------Test passed------")
| 44.207407
| 79
| 0.500168
|
79500bed84e7dd864d0f7b76da1e61545a318733
| 152
|
py
|
Python
|
setup.py
|
sevdog/django-reverse-js
|
0f42aa956d710449bba91d35c6b3b7e5040acbbf
|
[
"MIT"
] | null | null | null |
setup.py
|
sevdog/django-reverse-js
|
0f42aa956d710449bba91d35c6b3b7e5040acbbf
|
[
"MIT"
] | null | null | null |
setup.py
|
sevdog/django-reverse-js
|
0f42aa956d710449bba91d35c6b3b7e5040acbbf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
setup(
package_data={
'django_reverse_js': ['templates/django_reverse_js/*.js']
}
)
| 16.888889
| 65
| 0.671053
|
79500c01d96c2af833699e9bee66ee827ef01dc6
| 3,503
|
py
|
Python
|
source/SC_Event_Service/app.py
|
red-hat-data-services/smart-city-with-rh-cloud-services
|
29b29501214a69ccb939db1d2196c89d9b35132b
|
[
"MIT"
] | null | null | null |
source/SC_Event_Service/app.py
|
red-hat-data-services/smart-city-with-rh-cloud-services
|
29b29501214a69ccb939db1d2196c89d9b35132b
|
[
"MIT"
] | null | null | null |
source/SC_Event_Service/app.py
|
red-hat-data-services/smart-city-with-rh-cloud-services
|
29b29501214a69ccb939db1d2196c89d9b35132b
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine, Column, Integer, String, Numeric, DateTime, func, Boolean
from sqlalchemy.ext.declarative import declarative_base
from aiokafka import AIOKafkaConsumer
import asyncio, os, ast , sys
import nest_asyncio
nest_asyncio.apply()
## global variable :: setting this for kafka Consumer
KAFKA_ENDPOINT = os.getenv('KAFKA_ENDPOINT', 'localhost:9092')
KAFKA_TOPIC = os.getenv('KAFKA_TOPIC', 'lpr')
KAFKA_CONSUMER_GROUP_ID = os.getenv('KAFKA_CONSUMER_GROUP_ID', 'event_consumer_group')
loop = asyncio.get_event_loop()
## Database details and connection
DB_USER = os.getenv('DB_USER', 'dbadmin')
DB_PASSWORD = os.getenv('DB_PASSWORD', 'HT@1202k')
DB_HOST = os.getenv('DB_HOST', '127.0.0.1')
DB_NAME = os.getenv('DB_NAME','pgdb')
TABLE_NAME = os.getenv('TABLE_NAME','event')
Base = declarative_base()
class Event(Base):
__tablename__ = "event"
event_id = Column(String, primary_key=True, index=True)
event_timestamp = Column('date', DateTime(timezone=True), default=func.now())
event_vehicle_detected_plate_number = Column(String, index=True)
event_vehicle_lpn_detection_status = Column(String)
stationa1 = Column(Boolean, unique=False)
stationa5201 = Column(Boolean, unique=False)
stationa13 = Column(Boolean, unique=False)
stationa2 = Column(Boolean, unique=False)
stationa23 = Column(Boolean, unique=False)
stationb313 = Column(Boolean, unique=False)
stationa4202 = Column(Boolean, unique=False)
stationa41 = Column(Boolean, unique=False)
stationb504 = Column(Boolean, unique=False)
async def consume():
engine = create_engine('postgresql://'+DB_USER+':'+DB_PASSWORD+'@'+DB_HOST+'/'+DB_NAME+'?tcp_user_timeout=3000&connect_timeout=10', pool_pre_ping=True, connect_args={})
connection = engine.connect()
kafkaConsumer = AIOKafkaConsumer(KAFKA_TOPIC, loop=loop, bootstrap_servers=KAFKA_ENDPOINT, group_id=KAFKA_CONSUMER_GROUP_ID)
## Create Table if does not exists
Event.__table__.create(bind=engine, checkfirst=True)
await kafkaConsumer.start()
try:
async for msg in kafkaConsumer:
print(msg.key)
message = msg.value
payload=ast.literal_eval(message.decode('utf-8'))
try:
connection.execute(f"""INSERT INTO public.{TABLE_NAME}(event_id, date, event_vehicle_detected_plate_number, event_vehicle_lpn_detection_status, "stationa1", "stationa5201", "stationa13", "stationa2", "stationa23", "stationb313", "stationa4202"
, "stationa41", "stationb504" ) VALUES('{payload['event_id']}', '{payload['event_timestamp']}', '{payload['event_vehicle_detected_plate_number']}', '{payload['event_vehicle_lpn_detection_status']}', '{payload['stationa1']}', '{payload['stationa5201']}', '{payload['stationa13']}', '{payload['stationa2']}', '{payload['stationa23']}', '{payload['stationb313']}', '{payload['stationa4202']}', '{payload['stationa41']}', '{payload['stationb504']}'
)""")
print("===============================================")
print(payload)
print("Message written to DB successfully")
print("===============================================")
except Exception as e:
print(e)
print("Exiting ....")
sys.exit(1)
except Exception as e:
print(e.message)
print("Exiting ....")
sys.exit(1)
finally:
await kafkaConsumer.stop()
loop.run_until_complete(consume())
| 48.652778
| 444
| 0.675136
|
79500c370c9a169dd82b0c34a9fa63b5c61b195d
| 1,657
|
py
|
Python
|
tests/test_06_normaliseip.py
|
pcollinson/nftfw
|
c4c8aaf8f2aa7439848fd1c209d64b3d2991f1b7
|
[
"MIT"
] | 14
|
2020-07-10T05:38:21.000Z
|
2022-03-06T23:42:08.000Z
|
tests/test_06_normaliseip.py
|
pcollinson/nftfw
|
c4c8aaf8f2aa7439848fd1c209d64b3d2991f1b7
|
[
"MIT"
] | 4
|
2020-07-09T03:00:05.000Z
|
2020-07-11T14:40:40.000Z
|
tests/test_06_normaliseip.py
|
pcollinson/nftfw
|
c4c8aaf8f2aa7439848fd1c209d64b3d2991f1b7
|
[
"MIT"
] | null | null | null |
""" Test normalise address function """
from pathlib import Path
import pytest
from nftfw.normaliseaddress import NormaliseAddress
from nftfw.whitelistcheck import WhiteListCheck
from .configsetup import config_init
@pytest.fixture
def cf(): # pylint: disable=invalid-name
""" Get config from configsetup """
_cf = config_init()
return _cf
@pytest.fixture
def norm(cf):
""" Get Normalise address class """
_na = NormaliseAddress(cf, 'test package')
return _na
@pytest.fixture
def normwhite(cf):
""" Get Normalise address class with the iswhite hook enabled """
_na = NormaliseAddress(cf, 'test package')
_wh = WhiteListCheck(cf)
cf.is_white_fn = _wh.is_white
return _na
def test_basic(norm):
""" Test basic normalisation """
iplist = ('192.0.2.5', '198.51.100.128',
'198.51.100.5', '2001:db8:fab::/64',
'203.0.113.7')
for ip in iplist:
res = norm.normal(ip)
assert res == ip
def test_white(cf, normwhite):
""" Test whitelist option """
path = Path('data/whitelist.d/198.51.100.254')
assert path.exists()
res = normwhite.normal('198.51.100.254', cf.is_white_fn)
assert res is None
def test_networknorm(norm):
""" Test IP normalisation """
ip = '2001:db8:fab::677'
res = norm.normal(ip)
assert res == '2001:db8:fab::/64'
ip = '198.51.100.30/24'
res = norm.normal(ip)
assert res == '198.51.100.0/24'
def test_bad(norm):
""" Test bad addresses """
iplist = ('192.0.2', '192.0.2-255', '2001:db8:fab')
for ip in iplist:
res = norm.normal(ip)
assert res is None
| 24.014493
| 69
| 0.629451
|
79500ce693e8c292c4818f7baf0c05445b5667cb
| 9,072
|
py
|
Python
|
double_dummy/algorithms.py
|
andCelli/efg_tools
|
0de50f41e971d6ebb461ae4733843096af87e014
|
[
"MIT"
] | null | null | null |
double_dummy/algorithms.py
|
andCelli/efg_tools
|
0de50f41e971d6ebb461ae4733843096af87e014
|
[
"MIT"
] | null | null | null |
double_dummy/algorithms.py
|
andCelli/efg_tools
|
0de50f41e971d6ebb461ae4733843096af87e014
|
[
"MIT"
] | null | null | null |
"""
For any particular deal, given the declarer and the trump suit (or notrump), the double dummy result is the number of
tricks the declarer will win when all four players can see all 52 cards, and each player always plays to his or her best
advantage.
A complete DDA consists of 20 results, calculated by considering, in turn, each of the four players as declarer playing
in each of the five strains (four possible trump suits plus notrump).
******
Given declarer + bid, how many tricks can his team win?
"""
from misc.game_structures import *
import double_dummy.game.game_state as gs
from double_dummy.tree_exploration import ab_search
import time
import statistics
from typing import Dict
import random
import sys
ERROR_WRONG_NUM_OF_PLAYERS = "The number of players is not compatible with the number of hands given"
ERROR_DIFFERENT_HAND_SIZES = "The hands do not contain the same number of cards"
ERROR_WRONG_SET_OF_CARDS = "The cards do not make a legal deck of cards, please use all cards from A to the max rank for " \
"each suit"
ERROR_INCOMPATIBLE_RANKS = "The number of ranks is incompatible with the number of players, change the deck size so that " \
"each player gets the same number of cards"
ERROR_DUPLICATE_CARDS = "There are duplicates among the declarer's cards"
ERROR_ILLEGAL_RANK = "There are cards that exceed the maximum rank"
ERROR_ILLEGAL_HAND_OWNER = "You must specify a hand owner when giving only one player hand (use parameter hand_owner)"
_DEBUG = True
class DDAMatrix:
def __init__(self, n_players):
self._mat = {}
# init values to -1
for pid in range(n_players):
for suit in Suit:
self._mat[pid, suit.to_char()] = -1
def __setitem__(self, key, value):
self._mat[key] = value
def __getitem__(self, item):
return self._mat[item]
def __str__(self):
return str([f"{PID_TO_CARDINAL[key[0]]},{key[1]}:{self._mat[key]}" for key in self._mat.keys()])
def _check_hand_list(hands: Dict[PlayerId, List[Card]]):
"""
Performs basic checks on a list of hands. Throws errors if problems are found.
"""
# check all hands contain the same number of cards
# check that all cards are present
ranks = len(hands[0])
deck = Deck(ranks)
cards = []
for hand in hands.values():
assert len(hand) == ranks, ERROR_DIFFERENT_HAND_SIZES
cards += hand
assert set(deck.cards) == set(cards), ERROR_WRONG_SET_OF_CARDS
def _check_hand_declarer(hand: List[Card], ranks):
# all cards must be different
hand_set = set(hand)
assert len(hand) == len(hand_set), ERROR_DUPLICATE_CARDS
# all cards must have lower rank than 'ranks'
for card in hand:
assert card.rank <= ranks, ERROR_ILLEGAL_RANK
def _generate_game(hands, n_players: int, declarer: PlayerId, hand_owner: PlayerId, trump: str):
"""
Generate the game given the following parameters:
:param hands_str: string that describes the hands of the players. The strings need to be of the format
'rank:suit,rank:suit,...', where each hand is separated by a forward slash /.
Spaces are allowed. The list can either contain the hands of all the players OR the hand
of the declarer. In the latter case, the other hands will be randomly generated. The length of the hands represent
the ranks of the cards.
:param n_players: number of players.
:param declarer: id of the declarer.
:param hand_owner: id of the owner of the hand, in case only one hand is passed to the algorithm. Can be different
from the declarer.
:param trump: str containing the trump. It can be 'c', 'd', 'h', 's' or 'n' (no trump).
:return: generated GameState object and hands used (includes sampled hands).
"""
assert 2 <= n_players <= 4
assert len(hands) == 1 or len(hands) == n_players, ERROR_WRONG_NUM_OF_PLAYERS
if len(hands) == 1:
# need to sample hands
aux_hands = {}
aux_hands[hand_owner] = hands[0]
ranks = n_players*len(aux_hands[hand_owner])/4
assert ranks*4 % n_players == 0 and ranks.is_integer(), ERROR_INCOMPATIBLE_RANKS
_check_hand_declarer(aux_hands[hand_owner], ranks)
deck = Deck(int(ranks))
available_cards = set(deck.cards) - set(aux_hands[hand_owner])
for cur_player in range(n_players):
if cur_player != hand_owner:
aux_hands[cur_player] = []
# draw cards
for j in range(len(aux_hands[hand_owner])):
card = random.sample(available_cards, 1)[0]
aux_hands[cur_player].append(card)
available_cards.remove(card)
hands = aux_hands
if _DEBUG:
for cur_player in sorted(hands.keys()):
print(PID_TO_CARDINAL[cur_player], [card.short_string() for card in hands[cur_player]])
else:
_check_hand_list(hands)
ranks = len(hands[0])
teams = {0: Team(0), 1: Team(1)}
for j in range(n_players):
teams[j % 2].add_member(j)
suits = {
'c': Suit.clubs,
'd': Suit.diamonds,
'h': Suit.hearts,
's': Suit.spades,
'n': Suit.notrump
}
return gs.GameState(n_players, hands, ranks, suits[trump], declarer,
teams[declarer % 2].get_other_member(declarer)), hands
def dda_simple(hands, n_players: int, trump: str, declarer: PlayerId, hand_owner=-1, times=1, debug=True):
"""
Run alpha-beta search algorithm on a single game, given the bid, the declarer and the hand of the declarer or all
the hands.
:param hands
:param n_players: number of players.
:param declarer: id of the declarer.
:param hand_owner: id of the owner of the hand, in case only one hand is passed to the algorithm. Can be different
from the declarer.
:param trump: str containing the trump. It can be 'c', 'd', 'h', 's' or 'n' (no trump).
:param times: number of analysis to perform. To be used in case of sampling of hands. If hands_str has more than
one item, it is considered as a representation of all the hands, therefore times will be reset to 1 anyway.
:return: value of the DDA analysis.
"""
if len(hands) != 1:
times = 1
else:
assert 0 <= hand_owner < n_players, ERROR_ILLEGAL_HAND_OWNER
result_array = []
if debug:
print("Processing...")
for i in range(times):
if debug:
print(f"Game {i}")
game, _ = _generate_game(hands, n_players, declarer, hand_owner, trump)
result_array.append(ab_search(game))
result = statistics.mean(result_array)
if debug:
print(f"DDA analysis completed. The value is: {result}")
return result
def dda_extended(hands, n_players: int, hand_owner=-1, times=1):
"""
Run alpha-beta search algorithm on multiple games. All games are analysed using the same hands, while each game has
a different declarer-trump combination.
:param hands:
:param n_players: number of players.
:param hand_owner: id of the owner of the hand, in case only one hand is passed to the algorithm. Can be different
from the declarer.
:param times: number of analysis to perform. To be used in case of sampling of hands. If hands_str has more than
one item, it is considered as a representation of all the hands, therefore times will be reset to 1 anyway.
:return: DDAMatrix object containing the results. DDAMatrix is a dictionary where the keys are tuples made of the
declarer id (0, 1, 2 or 3, if there are 4 players) and the trump ('c', 'd', 'h', 's' or 'n').
"""
result_mat = DDAMatrix(n_players)
new_hands = None
if len(hands) != 1:
# if no sampling is needed, then it's useless to do multiple runs
times = 1
else:
assert 0 <= hand_owner < n_players, ERROR_ILLEGAL_HAND_OWNER
print("Processing...")
for i in range(times):
print(f"Game {i}")
for declarer in range(n_players):
for trump in Suit:
game, new_hands = _generate_game(hands if new_hands is None else new_hands,
n_players, declarer, hand_owner, trump.to_char())
result = ab_search(game)
old = result_mat[declarer, trump.to_char()]
result_mat[declarer, trump.to_char()] = (old*i + result)/(i+1)
# print(f"old mean = {old}, new mean = {result_mat[declarer, trump.to_char()]}, result added = {result}, new count = {i+1}")
new_hands = None
print(f"DDA analysis completed. The values are: {result_mat}")
return result_mat
if __name__ == '__main__':
h1 = "2:s,A:s,3:s"
h4 = "2:s,A:s,3:s/2:c,3:c,A:c/2:d,3:d,A:d/2:h,3:h,A:h"
n = 4
d = 0
t = 's'
dh1 = import_multiple_hands(h1)
dh4 = import_multiple_hands(h4)
dda_simple(dh1, n, t, d, hand_owner=0, times=1)
dda_extended(dh1, n, hand_owner=2, times=20)
| 40.5
| 140
| 0.656856
|
79500d0d3bd98b4075bac7b9981cdb60fbeb7756
| 683
|
py
|
Python
|
course_api/migrations/0025_auto_20180517_2230.py
|
dragonbone81/bobcat-courses-backend
|
d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c
|
[
"MIT"
] | 3
|
2018-10-25T12:41:33.000Z
|
2019-09-19T19:47:39.000Z
|
course_api/migrations/0025_auto_20180517_2230.py
|
dragonbone81/bobcat-courses-backend
|
d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c
|
[
"MIT"
] | 22
|
2018-04-01T02:43:01.000Z
|
2022-03-11T23:15:55.000Z
|
course_api/migrations/0025_auto_20180517_2230.py
|
dragonbone81/cse120
|
d0f98b837f37eb16a89a24ce9bd3f3f0fd52064c
|
[
"MIT"
] | 1
|
2019-09-19T19:48:59.000Z
|
2019-09-19T19:48:59.000Z
|
# Generated by Django 2.0.3 on 2018-05-18 05:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('course_api', '0024_auto_20180517_2200'),
]
operations = [
migrations.AlterField(
model_name='waitlist',
name='course',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='course_api.Course', verbose_name='Course'),
),
migrations.AlterField(
model_name='waitlist',
name='school',
field=models.CharField(max_length=64, verbose_name='School'),
),
]
| 27.32
| 131
| 0.626647
|
79500ecb1f3a129f0e980c93649abd12d47afcb4
| 955
|
py
|
Python
|
core/python/utilities.py
|
MrOerni/3d-chess
|
48172029dcb2ca1ab4efa98cdc59699419b9c450
|
[
"MIT"
] | 1
|
2016-09-26T14:49:35.000Z
|
2016-09-26T14:49:35.000Z
|
core/python/utilities.py
|
MrEbbinghaus/Tredisca
|
48172029dcb2ca1ab4efa98cdc59699419b9c450
|
[
"MIT"
] | 12
|
2015-09-02T21:31:09.000Z
|
2016-02-20T18:39:41.000Z
|
core/python/utilities.py
|
MrOerni/Tredisca
|
48172029dcb2ca1ab4efa98cdc59699419b9c450
|
[
"MIT"
] | null | null | null |
from enum import Enum
class bColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def add_vector(a, b) -> tuple:
(x0, y0, z0) = a
(x1, y1, z1) = b
return x0 + x1, y0 + y1, z0 + z1
def sub_vector(a, b) -> tuple:
(x0, y0, z0) = a
(x1, y1, z1) = b
return x0 - x1, y0 - y1, z0 - z1
def get_rel_vector(a, b) -> tuple:
(x0, y0, z0) = a
(x1, y1, z1) = b
return x1 - x0, y1 - y0, z1 - z0
def get_base_vector(a) -> tuple:
"""
:param a:
:return:
"""
(x, y, _) = a
return (1 if x > 0 else -1 if x < 0 else 0,
1 if y > 0 else -1 if y < 0 else 0, _)
class InvalidMoveException(Exception):
def __init__(self, message=""):
self.message = "Invalid Move! MSG: " + message
class Color(Enum):
black = "black"
white = "white"
| 18.72549
| 54
| 0.512042
|
79501003dbc40acc3a9efc48f73fe010c81752b5
| 2,201
|
py
|
Python
|
yt_dlp/extractor/onefootball.py
|
frainzy1477/yt-dlp
|
d298d33fe6ce410fcff5d936798486855b48c9ec
|
[
"Unlicense"
] | 2
|
2020-11-23T04:21:19.000Z
|
2020-12-27T18:35:24.000Z
|
yt_dlp/extractor/onefootball.py
|
frainzy1477/yt-dlp
|
d298d33fe6ce410fcff5d936798486855b48c9ec
|
[
"Unlicense"
] | null | null | null |
yt_dlp/extractor/onefootball.py
|
frainzy1477/yt-dlp
|
d298d33fe6ce410fcff5d936798486855b48c9ec
|
[
"Unlicense"
] | 1
|
2021-11-19T06:18:33.000Z
|
2021-11-19T06:18:33.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class OneFootballIE(InfoExtractor):
_VALID_URL = r'(?:https?://)(?:www\.)?onefootball\.com/[a-z]{2}/video/[^/&?#]+-(?P<id>\d+)'
_TESTS = [{
'url': 'https://onefootball.com/en/video/highlights-fc-zuerich-3-3-fc-basel-34012334',
'info_dict': {
'id': '34012334',
'ext': 'mp4',
'title': 'Highlights: FC Zürich 3-3 FC Basel',
'description': 'md5:33d9855cb790702c4fe42a513700aba8',
'thumbnail': 'https://photobooth-api.onefootball.com/api/screenshot/https:%2F%2Fperegrine-api.onefootball.com%2Fv2%2Fphotobooth%2Fcms%2Fen%2F34012334',
'timestamp': 1635874604,
'upload_date': '20211102'
},
'params': {'skip_download': True}
}, {
'url': 'https://onefootball.com/en/video/klopp-fumes-at-var-decisions-in-west-ham-defeat-34041020',
'info_dict': {
'id': '34041020',
'ext': 'mp4',
'title': 'Klopp fumes at VAR decisions in West Ham defeat',
'description': 'md5:9c50371095a01ad3f63311c73d8f51a5',
'thumbnail': 'https://photobooth-api.onefootball.com/api/screenshot/https:%2F%2Fperegrine-api.onefootball.com%2Fv2%2Fphotobooth%2Fcms%2Fen%2F34041020',
'timestamp': 1636314103,
'upload_date': '20211107'
},
'params': {'skip_download': True}
}]
def _real_extract(self, url):
id = self._match_id(url)
webpage = self._download_webpage(url, id)
data_json = self._search_json_ld(webpage, id)
m3u8_url = self._html_search_regex(r'(https://cdn\.jwplayer\.com/manifests/.+\.m3u8)', webpage, 'm3u8_url')
formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, id)
self._sort_formats(formats)
return {
'id': id,
'title': data_json.get('title'),
'description': data_json.get('description'),
'thumbnail': data_json.get('thumbnail'),
'timestamp': data_json.get('timestamp'),
'formats': formats,
'subtitles': subtitles,
}
| 42.326923
| 163
| 0.600636
|
795010ea5fa304ce2c7fed7f7d6b5ef9a4b5e20d
| 2,117
|
py
|
Python
|
for-proriv/myfuture/students/migrations/0001_initial.py
|
DmitryAA/EdVision
|
4a6a5d72eef1a6f2a0522e27666e17fca8e6dd10
|
[
"MIT"
] | null | null | null |
for-proriv/myfuture/students/migrations/0001_initial.py
|
DmitryAA/EdVision
|
4a6a5d72eef1a6f2a0522e27666e17fca8e6dd10
|
[
"MIT"
] | null | null | null |
for-proriv/myfuture/students/migrations/0001_initial.py
|
DmitryAA/EdVision
|
4a6a5d72eef1a6f2a0522e27666e17fca8e6dd10
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1 on 2019-06-23 00:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_string', models.CharField(max_length=200, verbose_name='Адрес')),
('phone', models.CharField(max_length=30, verbose_name='Телефон')),
('email', models.CharField(max_length=30, verbose_name='email адрес')),
('link', models.CharField(max_length=40, verbose_name='ссылка')),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=70, verbose_name='Название города')),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=90, verbose_name='Название региона')),
],
),
migrations.CreateModel(
name='Students',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100, verbose_name='Имя')),
('last_name', models.CharField(max_length=100, verbose_name='Фамилия')),
('patronimic_name', models.CharField(max_length=100, verbose_name='Отчество')),
('birtday', models.DateField(verbose_name='Дата рождения')),
('id_address', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='students.Address')),
],
),
]
| 41.509804
| 118
| 0.586207
|
79501129bfde86305fe1e9dd55bd1c59bf26b1f0
| 1,941
|
py
|
Python
|
benchmark/startPyquil3325.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil3325.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil3325.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=44
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=13
prog += H(3) # number=23
prog += CZ(0,3) # number=24
prog += Y(1) # number=37
prog += H(3) # number=25
prog += X(3) # number=18
prog += CNOT(3,1) # number=40
prog += CNOT(0,3) # number=19
prog += CNOT(0,3) # number=15
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=32
prog += H(0) # number=41
prog += CZ(3,0) # number=42
prog += H(0) # number=43
prog += CNOT(3,0) # number=26
prog += Z(3) # number=27
prog += H(0) # number=29
prog += CZ(3,0) # number=30
prog += H(0) # number=31
prog += H(0) # number=33
prog += CZ(3,0) # number=34
prog += H(0) # number=35
prog += H(2) # number=36
prog += H(3) # number=8
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
prog += Y(2) # number=38
prog += Y(2) # number=39
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil3325.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.207792
| 64
| 0.557445
|
795011b62d9bec89c716c342850b5a0e8e980d06
| 3,570
|
py
|
Python
|
contrib/tools/python/src/Tools/scripts/db2pickle.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
python/src/Tools/scripts/db2pickle.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
python/src/Tools/scripts/db2pickle.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
#!/usr/bin/env python
"""
Synopsis: %(prog)s [-h|-g|-b|-r|-a] dbfile [ picklefile ]
Convert the database file given on the command line to a pickle
representation. The optional flags indicate the type of the database:
-a - open using anydbm
-b - open as bsddb btree file
-d - open as dbm file
-g - open as gdbm file
-h - open as bsddb hash file
-r - open as bsddb recno file
The default is hash. If a pickle file is named it is opened for write
access (deleting any existing data). If no pickle file is named, the pickle
output is written to standard output.
"""
import getopt
try:
import bsddb
except ImportError:
bsddb = None
try:
import dbm
except ImportError:
dbm = None
try:
import gdbm
except ImportError:
gdbm = None
try:
import anydbm
except ImportError:
anydbm = None
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
prog = sys.argv[0]
def usage():
sys.stderr.write(__doc__ % globals())
def main(args):
try:
opts, args = getopt.getopt(args, "hbrdag",
["hash", "btree", "recno", "dbm",
"gdbm", "anydbm"])
except getopt.error:
usage()
return 1
if len(args) == 0 or len(args) > 2:
usage()
return 1
elif len(args) == 1:
dbfile = args[0]
pfile = sys.stdout
else:
dbfile = args[0]
try:
pfile = open(args[1], 'wb')
except IOError:
sys.stderr.write("Unable to open %s\n" % args[1])
return 1
dbopen = None
for opt, arg in opts:
if opt in ("-h", "--hash"):
try:
dbopen = bsddb.hashopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-b", "--btree"):
try:
dbopen = bsddb.btopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-r", "--recno"):
try:
dbopen = bsddb.rnopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-a", "--anydbm"):
try:
dbopen = anydbm.open
except AttributeError:
sys.stderr.write("anydbm module unavailable.\n")
return 1
elif opt in ("-g", "--gdbm"):
try:
dbopen = gdbm.open
except AttributeError:
sys.stderr.write("gdbm module unavailable.\n")
return 1
elif opt in ("-d", "--dbm"):
try:
dbopen = dbm.open
except AttributeError:
sys.stderr.write("dbm module unavailable.\n")
return 1
if dbopen is None:
if bsddb is None:
sys.stderr.write("bsddb module unavailable - ")
sys.stderr.write("must specify dbtype.\n")
return 1
else:
dbopen = bsddb.hashopen
try:
db = dbopen(dbfile, 'r')
except bsddb.error:
sys.stderr.write("Unable to open %s. " % dbfile)
sys.stderr.write("Check for format or version mismatch.\n")
return 1
for k in db.keys():
pickle.dump((k, db[k]), pfile, 1==1)
db.close()
pfile.close()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 26.25
| 76
| 0.529132
|
79501202b58d78eb975da4caa0a4754e5565a08f
| 6,025
|
py
|
Python
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/test_utils.py
|
austinmw/amazon-sagemaker-examples
|
e215fcb4a11346c3b63bbe50f0a27b76f4bb83cb
|
[
"Apache-2.0"
] | 4
|
2020-06-15T14:07:43.000Z
|
2020-06-28T12:34:13.000Z
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/test_utils.py
|
austinmw/amazon-sagemaker-examples
|
e215fcb4a11346c3b63bbe50f0a27b76f4bb83cb
|
[
"Apache-2.0"
] | 4
|
2020-09-26T01:06:05.000Z
|
2022-02-10T01:48:13.000Z
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/test_utils.py
|
99bcsagar/sagemaker
|
d6df186a527402352a612368879bfccfe0b80d02
|
[
"Apache-2.0"
] | 5
|
2020-06-27T12:15:51.000Z
|
2020-06-28T12:34:14.000Z
|
import pytest
import os
import multiprocessing
import json
import botocore
from markov import utils
from markov.s3_client import SageS3Client
from markov.log_handler.constants import (SIMAPP_EVENT_SYSTEM_ERROR,
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500, EXCEPTION_HANDLER_SYNC_FILE)
@pytest.mark.robomaker
def test_test_internet_connection(aws_region):
"""This function checks the functionality of test_internet_connection
function in markov/utils.py
If an exception is generated, log_and_exit will be called within the
function and the test will fail.
Args:
aws_region (String): AWS_REGION passed from fixture
"""
utils.test_internet_connection(aws_region)
@pytest.mark.robomaker
@pytest.mark.sagemaker
def test_load_model_metadata(s3_bucket, s3_prefix, aws_region, model_metadata_s3_key):
"""This function checks the functionality of load_model_metadata function
in markov/utils.py
The function checks if model_metadata.json file is downloaded into the required directory.
If the function fails, it will generate an exception which will call log_and_exit internally.
Hence the test will fail.
Args:
s3_bucket (String): S3_BUCKET
s3_prefix (String): S3_PREFIX
aws_region (String): AWS_REGION
model_metadata_s3_key (String): MODEL_METADATA_S3_KEY
"""
s3_client = SageS3Client(bucket=s3_bucket, s3_prefix=s3_prefix, aws_region=aws_region)
model_metadata_local_path = 'test_model_metadata.json'
utils.load_model_metadata(s3_client, model_metadata_s3_key, model_metadata_local_path)
assert os.path.isfile(model_metadata_local_path)
# Remove file downloaded
if os.path.isfile(model_metadata_local_path):
os.remove(model_metadata_local_path)
@pytest.mark.robomaker
@pytest.mark.sagemaker
def test_has_current_ckpnt_name(s3_bucket, s3_prefix, aws_region):
"""This function checks the functionality of has_current_ckpnt_name function
in markov/utils.py
<utils.has_current_ckpnt_name> checks if the checkpoint key (.coach_checkpoint) is present in S3
Args:
s3_bucket (String): S3_BUCKET
s3_prefix (String): S3_PREFIX
aws_region (String): AWS_REGION
"""
assert utils.has_current_ckpnt_name(s3_bucket, s3_prefix, aws_region)
@pytest.mark.robomaker
@pytest.mark.sagemaker
@pytest.mark.parametrize("error, expected", [("Exception that doesn't contain any keyword", False),
("Exception that contains keyword checkpoint", True)])
def test_is_error_bad_ckpnt(error, expected):
"""This function checks the functionality of is_error_bad_ckpnt function
in markov/utils.py
<is_error_bad_ckpnt> determines whether a value error is caused by an invalid checkpoint
by looking for keywords 'tensor', 'shape', 'checksum', 'checkpoint' in the exception message
Args:
error (String): Error message to be parsed
expected (Boolean): Expected return from function
"""
assert utils.is_error_bad_ckpnt(error) == expected
@pytest.mark.robomaker
@pytest.mark.parametrize("racecar_num, racer_names", [(1, ['racecar']),
(2, ['racecar_0', 'racecar_1'])])
def test_get_racecar_names(racecar_num, racer_names):
"""This function checks the functionality of get_racecar_names function
in markov/utils.py
Args:
racecar_num (int): The number of racecars
racer_names (List): Returned list of racecar names
"""
assert utils.get_racecar_names(racecar_num) == racer_names
@pytest.mark.robomaker
@pytest.mark.parametrize("racecar_name, racecar_num", [('racecar', None),
('racecar_1', 1)])
def test_get_racecar_idx(racecar_name, racecar_num):
"""This function checks the functionality of get_racecar_idx function
in markov/utils.py
Args:
racecar_name (String): The name of racecar
racecar_num: If single car race, returns None else returns the racecar number
"""
assert utils.get_racecar_idx(racecar_name) == racecar_num
@pytest.mark.robomaker
def test_get_racecar_idx_exception():
"""This function checks the functionality of get_racecar_idx function
in markov/utils.py when exception is generated if wrong format passed
"""
# Remove any sync file generated because of other tests generating exceptions
if os.path.isfile(EXCEPTION_HANDLER_SYNC_FILE):
os.remove(EXCEPTION_HANDLER_SYNC_FILE)
err_message = "racecar name should be in format racecar_x. However, get"
proc = multiprocessing.Process(target=utils.get_racecar_idx,
args=('1_racecar', ))
proc.start()
proc.join()
assert os.path.isfile(EXCEPTION_HANDLER_SYNC_FILE)
try:
with open(EXCEPTION_HANDLER_SYNC_FILE, 'r') as sync_file:
captured_log = json.loads(sync_file.read())
finally:
# Remove the sync file created due to log_and_exit
os.remove(EXCEPTION_HANDLER_SYNC_FILE)
assert not proc.is_alive()
assert proc.exitcode == 1
assert err_message in captured_log['simapp_exception']['message']
assert captured_log['simapp_exception']['exceptionType'] == SIMAPP_SIMULATION_WORKER_EXCEPTION
assert captured_log['simapp_exception']['eventType'] == SIMAPP_EVENT_SYSTEM_ERROR
assert captured_log['simapp_exception']['errorCode'] == SIMAPP_EVENT_ERROR_CODE_500
@pytest.mark.robomaker
def test_force_list(s3_bucket):
"""This function checks the functionality of force_list function
in markov/utils.py
Args:
s3_bucket (String): S3_BUCKET
"""
assert utils.force_list(s3_bucket) == [s3_bucket]
@pytest.mark.robomaker
def test_get_boto_config():
"""This function checks the functionality of get_boto_config function
in markov/utils.py
"""
utils.get_boto_config()
| 40.166667
| 100
| 0.71917
|
79501284d0e33713369f547ec3758bbf5fdca7c9
| 287
|
py
|
Python
|
manimlib/utils/sounds.py
|
sunkisser/manim
|
39673a80d7bbbea258c35ce5a1d37a0911aae4f1
|
[
"MIT"
] | 1
|
2022-03-23T06:27:22.000Z
|
2022-03-23T06:27:22.000Z
|
manimlib/utils/sounds.py
|
sunkisser/manim
|
39673a80d7bbbea258c35ce5a1d37a0911aae4f1
|
[
"MIT"
] | null | null | null |
manimlib/utils/sounds.py
|
sunkisser/manim
|
39673a80d7bbbea258c35ce5a1d37a0911aae4f1
|
[
"MIT"
] | null | null | null |
from manimlib.utils.file_ops import find_file
from manimlib.utils.directories import get_sound_dir
def get_full_sound_file_path(sound_file_name) -> str:
return find_file(
sound_file_name,
directories=[get_sound_dir()],
extensions=[".wav", ".mp3", ""]
)
| 26.090909
| 53
| 0.703833
|
795012e6f2327d16b58307bea7e7970f1eee1eb1
| 8,462
|
py
|
Python
|
DataUtils/Embed.py
|
Wiser1990/pytorch_NER_BiLSTM_CNN_CRF
|
6dcda3558001d6c7c3b7ca63ed08718a96effe02
|
[
"Apache-2.0"
] | 1
|
2018-11-20T11:09:11.000Z
|
2018-11-20T11:09:11.000Z
|
DataUtils/Embed.py
|
Wiser1990/pytorch_NER_BiLSTM_CNN_CRF
|
6dcda3558001d6c7c3b7ca63ed08718a96effe02
|
[
"Apache-2.0"
] | null | null | null |
DataUtils/Embed.py
|
Wiser1990/pytorch_NER_BiLSTM_CNN_CRF
|
6dcda3558001d6c7c3b7ca63ed08718a96effe02
|
[
"Apache-2.0"
] | null | null | null |
# @Author : bamtercelboo
# @Datetime : 2018/8/27 15:34
# @File : Embed.py
# @Last Modify Time : 2018/8/27 15:34
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Embed.py
FUNCTION : None
"""
import os
import sys
import time
import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
from collections import OrderedDict
from DataUtils.Common import *
torch.manual_seed(seed_num)
np.random.seed(seed_num)
class Embed(object):
"""
Embed
"""
def __init__(self, path, words_dict, embed_type, pad):
self.embed_type_enum = ["zero", "avg", "uniform", "nn"]
self.path = path
self.words_dict = words_dict
self.embed_type = embed_type
self.pad = pad
# print(self.words_dict)
if not isinstance(self.words_dict, dict):
self.words_dict, self.words_list = self._list2dict(self.words_dict)
if pad is not None: self.padID = self.words_dict[pad]
# print(self.words_dict)
self.dim, self.words_count = self._get_dim(path=self.path), len(self.words_dict)
self.exact_count, self.fuzzy_count, self.oov_count = 0, 0, 0
def get_embed(self):
"""
:return:
"""
embed_dict = None
if self.embed_type in self.embed_type_enum:
embed_dict = self._read_file(path=self.path)
else:
print("embed_type illegal, must be in {}".format(self.embed_type_enum))
exit()
# print(embed_dict)
embed = None
if self.embed_type == "nn":
embed = self._nn_embed(embed_dict=embed_dict, words_dict=self.words_dict)
elif self.embed_type == "zero":
embed = self._zeros_embed(embed_dict=embed_dict, words_dict=self.words_dict)
elif self.embed_type == "uniform":
embed = self._uniform_embed(embed_dict=embed_dict, words_dict=self.words_dict)
elif self.embed_type == "avg":
embed = self._avg_embed(embed_dict=embed_dict, words_dict=self.words_dict)
# print(embed)
self.info()
return embed
def _zeros_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by zeros for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
self.fuzzy_count += 1
else:
self.oov_count += 1
final_embed = torch.from_numpy(embeddings).float()
return final_embed
def _nn_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by nn.Embedding for out of vocabulary.")
embed = nn.Embedding(int(self.words_count), int(self.dim))
init.xavier_uniform(embed.weight.data)
embeddings = np.array(embed.weight.data)
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
self.fuzzy_count += 1
else:
self.oov_count += 1
final_embed = torch.from_numpy(embeddings).float()
return final_embed
def _uniform_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by uniform for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
inword_list = {}
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
inword_list[words_dict[word]] = 1
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
inword_list[words_dict[word]] = 1
self.fuzzy_count += 1
else:
self.oov_count += 1
uniform_col = np.random.uniform(-0.25, 0.25, int(self.dim)).round(6) # uniform
for i in range(len(words_dict)):
if i not in inword_list and i != self.padID:
embeddings[i] = uniform_col
final_embed = torch.from_numpy(embeddings).float()
return final_embed
def _avg_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by avg for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
inword_list = {}
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
inword_list[words_dict[word]] = 1
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
inword_list[words_dict[word]] = 1
self.fuzzy_count += 1
else:
self.oov_count += 1
sum_col = np.sum(embeddings, axis=0) / len(inword_list) # avg
for i in range(len(words_dict)):
if i not in inword_list and i != self.padID:
embeddings[i] = sum_col
final_embed = torch.from_numpy(embeddings).float()
return final_embed
@staticmethod
def _read_file(path):
"""
:param path: embed file path
:return:
"""
embed_dict = {}
with open(path, encoding='utf-8') as f:
lines = f.readlines()
lines = tqdm.tqdm(lines)
for line in lines:
values = line.strip().split(' ')
if len(values) == 1 or len(values) == 2 or len(values) == 3:
continue
w, v = values[0], values[1:]
embed_dict[w] = v
return embed_dict
def info(self):
"""
:return:
"""
total_count = self.exact_count + self.fuzzy_count
print("Words count {}, Embed dim {}.".format(self.words_count, self.dim))
print("Exact count {} / {}".format(self.exact_count, self.words_count))
print("Fuzzy count {} / {}".format(self.fuzzy_count, self.words_count))
print(" INV count {} / {}".format(total_count, self.words_count))
print(" OOV count {} / {}".format(self.oov_count, self.words_count))
print(" OOV radio ===> {}%".format(np.round((self.oov_count / total_count) * 100, 2)))
print(40 * "*")
@staticmethod
def _get_dim(path):
"""
:param path:
:return:
"""
embedding_dim = -1
with open(path, encoding='utf-8') as f:
for line in f:
line_split = line.strip().split(' ')
if len(line_split) == 1:
embedding_dim = line_split[0]
break
elif len(line_split) == 2:
embedding_dim = line_split[1]
break
else:
embedding_dim = len(line_split) - 1
break
return embedding_dim
@staticmethod
def _list2dict(convert_list):
"""
:param convert_list:
:return:
"""
list_dict = OrderedDict()
list_lower = []
for index, word in enumerate(convert_list):
list_lower.append(word.lower())
list_dict[word] = index
assert len(list_lower) == len(list_dict)
return list_dict, list_lower
| 37.442478
| 118
| 0.569369
|
795014765f7a2ae1d8640d57080b025b036fcc07
| 561
|
py
|
Python
|
classes/item.py
|
chamburr/lamerpg
|
05c125590500286af3664efa82b92a2874823219
|
[
"MIT"
] | null | null | null |
classes/item.py
|
chamburr/lamerpg
|
05c125590500286af3664efa82b92a2874823219
|
[
"MIT"
] | null | null | null |
classes/item.py
|
chamburr/lamerpg
|
05c125590500286af3664efa82b92a2874823219
|
[
"MIT"
] | null | null | null |
class Item:
def __init__(self, **kwargs):
self.type = kwargs.get("type")
self._name = kwargs.get("name")
self.level = kwargs.get("level")
def add_level(self, level):
self.level += level
@property
def name(self):
if self.type == "sword":
return f"Sword of {self._name}"
else:
return f"Shield of {self._name}"
@property
def action(self):
if self.type == "sword":
return "attack"
else:
return "defense"
| 24.391304
| 45
| 0.500891
|
795015864f7353844b5b02560ee94e5f7f801d90
| 3,189
|
py
|
Python
|
wit/dep/old_examples/simple-forum-matching.py
|
bkj/what-is-this
|
49c6e4f9809623d8580433baf00e507faacb04f0
|
[
"Apache-2.0"
] | 21
|
2016-01-12T05:20:29.000Z
|
2022-01-05T18:14:40.000Z
|
wit/dep/old_examples/simple-forum-matching.py
|
bkj/what-is-this
|
49c6e4f9809623d8580433baf00e507faacb04f0
|
[
"Apache-2.0"
] | null | null | null |
wit/dep/old_examples/simple-forum-matching.py
|
bkj/what-is-this
|
49c6e4f9809623d8580433baf00e507faacb04f0
|
[
"Apache-2.0"
] | 38
|
2016-11-08T04:49:17.000Z
|
2019-01-10T04:47:52.000Z
|
# --
# Load deps
import keras
import pandas as pd
import urllib2
from hashlib import md5
from bs4 import BeautifulSoup
from pprint import pprint
from matplotlib import pyplot as plt
import sys
sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
from wit import *
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 120)
np.set_printoptions(linewidth=100)
# --
# Config + Init
num_features = 75 # Character
max_len = 350 # Character
formatter = KerasFormatter(num_features, max_len)
# --
# Load and format data
path = '/Users/BenJohnson/projects/what-is-this/qpr/gun_leaves_20151118_v2.h5'
in_store = pd.HDFStore(path,complevel=9, complib='bzip2')
df = {}
for k in ['/www.remingtonsociety.com', '/marauderairrifle.com']:
df[k] = in_store[k]
df[k]['origin'] = k
df = pd.concat(df.values())
in_store.close()
# Post cleaning
df['shash'] = df.origin.apply(lambda x: md5(x).hexdigest()[0:5])
df['hash'] = df.apply(lambda x: str(x['hash']) + '-' + x['shash'], 1)
df['id'] = df.apply(lambda x: str(x['id']) + '-' + x['shash'], 1)
df['src'] = df.obj
df['obj'] = df.src.apply(lambda x: BeautifulSoup(x).text.encode('ascii', 'ignore'))
# Subset to frequent paths, w/ more than 100 unique values
chash = df.groupby('hash').apply(lambda x: len(x.obj.unique()))
keep = list(chash[chash > 100].index)
df = df[df.hash.apply(lambda x: x in keep)]
# --
# Make all pairs
train = make_triplet_train(df, N = 600)
trn, _ = formatter.format(train, ['obj'], 'hash')
# Test set of all unique points
unq = df.copy()
del unq['id']
unq = unq.drop_duplicates()
awl, _ = formatter.format(unq, ['obj'], 'hash')
# --
# Define model
recurrent_size = 32 # How to pick?
dense_size = 5 # How to pick?
model = Sequential()
model.add(Embedding(num_features, recurrent_size))
model.add(LSTM(recurrent_size))
model.add(Dense(dense_size))
model.add(Activation('unit_norm'))
model.compile(loss = 'triplet_cosine', optimizer = 'adam')
# --
# Train model
# Shuffles while maintaining groups
N = 3
for _ in range(N):
ms = modsel(train.shape[0], N = 3)
_ = model.fit(
trn['x'][0][ms], trn['x'][0][ms],
nb_epoch = 1,
batch_size = 3 * 250,
shuffle = False
)
preds = model.predict(awl['x'][0], verbose = True)
colors = awl['y'].argmax(1)
plt.scatter(preds[:,0], preds[:,2], c = colors)
plt.show()
# --
# Clustering results
# This is not the ideal algorithm for clustering the results,
# but it does an OK job.
#
# In this case we're losing some of the fields
#
from sklearn.cluster import DBSCAN
db = DBSCAN(eps = .1, min_samples = 50).fit(preds)
res = unq.hash.groupby(db.labels_).apply(lambda x: x.value_counts()).reset_index()
res.columns = ('cluster', 'hash', 'cnt')
res = res.sort('hash')
good_res = res[(res.cnt > 100) & (res.cluster > -1)]
good_res
missing_hashes = set(res.hash.unique()).difference(set(good_res.hash.unique()))
res[res.hash.isin(missing_hashes)]
eqv = list(good_res.groupby('cluster').hash.apply(lambda x: list(x)))
eqv = map(eval, np.unique(map(str, eqv)))
print_eqv(eqv, df, path = 'src')
| 24.914063
| 90
| 0.661336
|
795015b71d6762abf2bd019c16e078fd5a5beba4
| 11,214
|
py
|
Python
|
.venv/lib/python3.9/site-packages/pylint/checkers/refactoring/recommendation_checker.py
|
linuscyl/multimedia-computing-assignment
|
4932f531f67a7f57e132ea358bf70a2347021aa9
|
[
"Apache-2.0"
] | null | null | null |
.venv/lib/python3.9/site-packages/pylint/checkers/refactoring/recommendation_checker.py
|
linuscyl/multimedia-computing-assignment
|
4932f531f67a7f57e132ea358bf70a2347021aa9
|
[
"Apache-2.0"
] | null | null | null |
.venv/lib/python3.9/site-packages/pylint/checkers/refactoring/recommendation_checker.py
|
linuscyl/multimedia-computing-assignment
|
4932f531f67a7f57e132ea358bf70a2347021aa9
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
from typing import cast
import astroid
from pylint import checkers, interfaces
from pylint.checkers import utils
class RecommendationChecker(checkers.BaseChecker):
__implements__ = (interfaces.IAstroidChecker,)
name = "refactoring"
msgs = {
"C0200": (
"Consider using enumerate instead of iterating with range and len",
"consider-using-enumerate",
"Emitted when code that iterates with range and len is "
"encountered. Such code can be simplified by using the "
"enumerate builtin.",
),
"C0201": (
"Consider iterating the dictionary directly instead of calling .keys()",
"consider-iterating-dictionary",
"Emitted when the keys of a dictionary are iterated through the .keys() "
"method. It is enough to just iterate through the dictionary itself, as "
'in "for key in dictionary".',
),
"C0206": (
"Consider iterating with .items()",
"consider-using-dict-items",
"Emitted when iterating over the keys of a dictionary and accessing the "
"value by index lookup. "
"Both the key and value can be accessed by iterating using the .items() "
"method of the dictionary instead.",
),
"C0207": (
"Use %s instead",
"use-maxsplit-arg",
"Emitted when accessing only the first or last element of str.split(). "
"The first and last element can be accessed by using "
"str.split(sep, maxsplit=1)[0] or str.rsplit(sep, maxsplit=1)[-1] "
"instead.",
),
}
@staticmethod
def _is_builtin(node, function):
inferred = utils.safe_infer(node)
if not inferred:
return False
return utils.is_builtin_object(inferred) and inferred.name == function
@utils.check_messages("consider-iterating-dictionary", "use-maxsplit-arg")
def visit_call(self, node: astroid.Call) -> None:
self._check_consider_iterating_dictionary(node)
self._check_use_maxsplit_arg(node)
def _check_consider_iterating_dictionary(self, node: astroid.Call) -> None:
if not isinstance(node.func, astroid.Attribute):
return
if node.func.attrname != "keys":
return
if not isinstance(node.parent, (astroid.For, astroid.Comprehension)):
return
inferred = utils.safe_infer(node.func)
if not isinstance(inferred, astroid.BoundMethod) or not isinstance(
inferred.bound, astroid.Dict
):
return
if isinstance(node.parent, (astroid.For, astroid.Comprehension)):
self.add_message("consider-iterating-dictionary", node=node)
def _check_use_maxsplit_arg(self, node: astroid.Call) -> None:
"""Add message when accessing first or last elements of a str.split() or str.rsplit()."""
# Check if call is split() or rsplit()
if (
isinstance(node.func, astroid.Attribute)
and node.func.attrname in ("split", "rsplit")
and isinstance(utils.safe_infer(node.func), astroid.BoundMethod)
):
try:
utils.get_argument_from_call(node, 0, "sep")
except utils.NoSuchArgumentError:
return
try:
# Ignore if maxsplit arg has been set
utils.get_argument_from_call(node, 1, "maxsplit")
return
except utils.NoSuchArgumentError:
pass
if isinstance(node.parent, astroid.Subscript):
try:
subscript_value = utils.get_subscript_const_value(node.parent).value
except utils.InferredTypeError:
return
if subscript_value in (-1, 0):
fn_name = node.func.attrname
new_fn = "rsplit" if subscript_value == -1 else "split"
new_name = (
node.func.as_string().rsplit(fn_name, maxsplit=1)[0]
+ new_fn
+ f"({node.args[0].as_string()}, maxsplit=1)[{subscript_value}]"
)
self.add_message("use-maxsplit-arg", node=node, args=(new_name,))
@utils.check_messages("consider-using-enumerate", "consider-using-dict-items")
def visit_for(self, node: astroid.For) -> None:
self._check_consider_using_enumerate(node)
self._check_consider_using_dict_items(node)
def _check_consider_using_enumerate(self, node: astroid.For) -> None:
"""Emit a convention whenever range and len are used for indexing."""
# Verify that we have a `range([start], len(...), [stop])` call and
# that the object which is iterated is used as a subscript in the
# body of the for.
# Is it a proper range call?
if not isinstance(node.iter, astroid.Call):
return
if not self._is_builtin(node.iter.func, "range"):
return
if not node.iter.args:
return
is_constant_zero = (
isinstance(node.iter.args[0], astroid.Const)
and node.iter.args[0].value == 0
)
if len(node.iter.args) == 2 and not is_constant_zero:
return
if len(node.iter.args) > 2:
return
# Is it a proper len call?
if not isinstance(node.iter.args[-1], astroid.Call):
return
second_func = node.iter.args[-1].func
if not self._is_builtin(second_func, "len"):
return
len_args = node.iter.args[-1].args
if not len_args or len(len_args) != 1:
return
iterating_object = len_args[0]
if isinstance(iterating_object, astroid.Name):
expected_subscript_val_type = astroid.Name
elif isinstance(iterating_object, astroid.Attribute):
expected_subscript_val_type = astroid.Attribute
else:
return
# If we're defining __iter__ on self, enumerate won't work
scope = node.scope()
if (
isinstance(iterating_object, astroid.Name)
and iterating_object.name == "self"
and scope.name == "__iter__"
):
return
# Verify that the body of the for loop uses a subscript
# with the object that was iterated. This uses some heuristics
# in order to make sure that the same object is used in the
# for body.
for child in node.body:
for subscript in child.nodes_of_class(astroid.Subscript):
subscript = cast(astroid.Subscript, subscript)
if not isinstance(subscript.value, expected_subscript_val_type):
continue
value = subscript.slice
if not isinstance(value, astroid.Name):
continue
if subscript.value.scope() != node.scope():
# Ignore this subscript if it's not in the same
# scope. This means that in the body of the for
# loop, another scope was created, where the same
# name for the iterating object was used.
continue
if value.name == node.target.name and (
isinstance(subscript.value, astroid.Name)
and iterating_object.name == subscript.value.name
or isinstance(subscript.value, astroid.Attribute)
and iterating_object.attrname == subscript.value.attrname
):
self.add_message("consider-using-enumerate", node=node)
return
def _check_consider_using_dict_items(self, node: astroid.For) -> None:
"""Add message when accessing dict values by index lookup."""
# Verify that we have a .keys() call and
# that the object which is iterated is used as a subscript in the
# body of the for.
iterating_object_name = utils.get_iterating_dictionary_name(node)
if iterating_object_name is None:
return
# Verify that the body of the for loop uses a subscript
# with the object that was iterated. This uses some heuristics
# in order to make sure that the same object is used in the
# for body.
for child in node.body:
for subscript in child.nodes_of_class(astroid.Subscript):
subscript = cast(astroid.Subscript, subscript)
if not isinstance(subscript.value, (astroid.Name, astroid.Attribute)):
continue
value = subscript.slice
if (
not isinstance(value, astroid.Name)
or value.name != node.target.name
or iterating_object_name != subscript.value.as_string()
):
continue
last_definition_lineno = value.lookup(value.name)[1][-1].lineno
if last_definition_lineno > node.lineno:
# Ignore this subscript if it has been redefined after
# the for loop. This checks for the line number using .lookup()
# to get the line number where the iterating object was last
# defined and compare that to the for loop's line number
continue
if (
isinstance(subscript.parent, astroid.Assign)
and subscript in subscript.parent.targets
or isinstance(subscript.parent, astroid.AugAssign)
and subscript == subscript.parent.target
):
# Ignore this subscript if it is the target of an assignment
# Early termination as dict index lookup is necessary
return
self.add_message("consider-using-dict-items", node=node)
return
@utils.check_messages("consider-using-dict-items")
def visit_comprehension(self, node: astroid.Comprehension) -> None:
iterating_object_name = utils.get_iterating_dictionary_name(node)
if iterating_object_name is None:
return
for child in node.parent.get_children():
for subscript in child.nodes_of_class(astroid.Subscript):
subscript = cast(astroid.Subscript, subscript)
if not isinstance(subscript.value, (astroid.Name, astroid.Attribute)):
continue
value = subscript.slice
if (
not isinstance(value, astroid.Name)
or value.name != node.target.name
or iterating_object_name != subscript.value.as_string()
):
continue
self.add_message("consider-using-dict-items", node=node)
return
| 42.316981
| 97
| 0.580703
|
7950167e1acce229e0d62d8d9f3eb88d24b695d0
| 483
|
py
|
Python
|
farms2face/home/migrations/0003_prepack_description.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | null | null | null |
farms2face/home/migrations/0003_prepack_description.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | null | null | null |
farms2face/home/migrations/0003_prepack_description.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | 2
|
2018-06-19T12:12:08.000Z
|
2018-06-25T18:45:36.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-20 06:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_remove_prepack_description'),
]
operations = [
migrations.AddField(
model_name='prepack',
name='description',
field=models.CharField(blank=True, max_length=5000, null=True),
),
]
| 23
| 75
| 0.63147
|
7950179855efec5ea4b62e994ebcee3789dfd1d2
| 4,547
|
py
|
Python
|
src/poetry/console/commands/debug/resolve.py
|
yokomotod/poetry
|
4838c9fe9645c62353be569a96765c693f03f1a3
|
[
"MIT"
] | null | null | null |
src/poetry/console/commands/debug/resolve.py
|
yokomotod/poetry
|
4838c9fe9645c62353be569a96765c693f03f1a3
|
[
"MIT"
] | null | null | null |
src/poetry/console/commands/debug/resolve.py
|
yokomotod/poetry
|
4838c9fe9645c62353be569a96765c693f03f1a3
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import TYPE_CHECKING
from cleo.helpers import argument
from cleo.helpers import option
from cleo.io.outputs.output import Verbosity
from poetry.console.commands.init import InitCommand
if TYPE_CHECKING:
from poetry.console.commands.show import ShowCommand
class DebugResolveCommand(InitCommand):
name = "debug resolve"
description = "Debugs dependency resolution."
arguments = [
argument("package", "The packages to resolve.", optional=True, multiple=True)
]
options = [
option(
"extras",
"E",
"Extras to activate for the dependency.",
flag=False,
multiple=True,
),
option("python", None, "Python version(s) to use for resolution.", flag=False),
option("tree", None, "Display the dependency tree."),
option("install", None, "Show what would be installed for the current system."),
]
loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"]
def handle(self) -> int:
from cleo.io.null_io import NullIO
from poetry.core.packages.project_package import ProjectPackage
from poetry.factory import Factory
from poetry.puzzle import Solver
from poetry.repositories.pool import Pool
from poetry.repositories.repository import Repository
from poetry.utils.env import EnvManager
packages = self.argument("package")
if not packages:
package = self.poetry.package
else:
# Using current pool for determine_requirements()
self._pool = self.poetry.pool
package = ProjectPackage(
self.poetry.package.name, self.poetry.package.version
)
# Silencing output
verbosity = self.io.output.verbosity
self.io.output.set_verbosity(Verbosity.QUIET)
requirements = self._determine_requirements(packages)
self.io.output.set_verbosity(verbosity)
for constraint in requirements:
name = constraint.pop("name")
assert isinstance(name, str)
extras = []
for extra in self.option("extras"):
if " " in extra:
extras += [e.strip() for e in extra.split(" ")]
else:
extras.append(extra)
constraint["extras"] = extras
package.add_dependency(Factory.create_dependency(name, constraint))
package.python_versions = self.option("python") or (
self.poetry.package.python_versions
)
pool = self.poetry.pool
solver = Solver(package, pool, Repository(), Repository(), self._io)
ops = solver.solve().calculate_operations()
self.line("")
self.line("Resolution results:")
self.line("")
if self.option("tree"):
show_command: ShowCommand = self.application.find("show")
show_command.init_styles(self.io)
packages = [op.package for op in ops]
repo = Repository(packages=packages)
requires = package.all_requires
for pkg in repo.packages:
for require in requires:
if pkg.name == require.name:
show_command.display_package_tree(self.io, pkg, repo)
break
return 0
table = self.table(style="compact")
table.style.set_vertical_border_chars("", " ")
rows = []
if self.option("install"):
env = EnvManager(self.poetry).get()
pool = Pool()
locked_repository = Repository()
for op in ops:
locked_repository.add_package(op.package)
pool.add_repository(locked_repository)
solver = Solver(package, pool, Repository(), Repository(), NullIO())
with solver.use_environment(env):
ops = solver.solve().calculate_operations()
for op in ops:
if self.option("install") and op.skipped:
continue
pkg = op.package
row = [
f"<c1>{pkg.complete_name}</c1>",
f"<b>{pkg.version}</b>",
]
if not pkg.marker.is_any():
row[2] = str(pkg.marker)
rows.append(row)
table.set_rows(rows)
table.render()
return 0
| 30.516779
| 88
| 0.575104
|
795019132d19ee23c6f45e79da2bff0141d0f804
| 856
|
py
|
Python
|
sample.py
|
AdityaPai2398/OpenCV-Playground
|
70969d9b60dc2e83e44c23632986b96d43eb2e95
|
[
"MIT"
] | 1
|
2018-06-25T07:28:30.000Z
|
2018-06-25T07:28:30.000Z
|
sample.py
|
AdityaPai2398/OpenCV-Playground
|
70969d9b60dc2e83e44c23632986b96d43eb2e95
|
[
"MIT"
] | null | null | null |
sample.py
|
AdityaPai2398/OpenCV-Playground
|
70969d9b60dc2e83e44c23632986b96d43eb2e95
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
#load image
img = cv2.imread('Lenna.png',-1)
cv2.imshow('This is Lenna',img)
rect =cv2.rectangle(img,(255,255),(275,275),(225,255,0),3)
circ =cv2.circle(img,(290,290),100,(205,205,255),3)
line =cv2.line(img,(345,265),(365,285),(225,255,0),3)
row,col,chan = img.shape
rot = cv2.getRotationMatrix2D((256,256),90,1)
im2 = cv2.warpAffine(rect,rot,(512,512))
cv2.imshow('This is Lenna',im2)
while True:
a = cv2.waitKey(0)
print (a)
#press d to rotate
if a==100:
print("Hello")
rot = cv2.getRotationMatrix2D((256,256),90,1)
im2 = cv2.warpAffine(im2,rot,(512,512))
cv2.imshow('This is Lenna',im2)
pressedKey = cv2.waitKey(0)
if pressedKey==27:
print("exit")
pressedKey = cv2.waitKey(1000)
cv2.destroyAllWindows()
| 18.608696
| 59
| 0.606308
|
795019f9487a618d88e982b89f5169f5c08d740c
| 13,418
|
py
|
Python
|
sdk/core/azure-common/tests/test_client_factory.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-09T08:59:13.000Z
|
2022-03-09T08:59:13.000Z
|
sdk/core/azure-common/tests/test_client_factory.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/core/azure-common/tests/test_client_factory.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import json
import os
import tempfile
import unittest
import pytest
try:
from unittest import mock
except ImportError:
import mock
from io import open
from msrestazure.azure_cloud import AZURE_PUBLIC_CLOUD
# https://github.com/Azure/azure-cli/blob/4e1ff0ec626ea46d74793ad92a1b5eddc2b6e45b/src/azure-cli-core/azure/cli/core/cloud.py#L310
AZURE_PUBLIC_CLOUD.endpoints.app_insights_resource_id='https://api.applicationinsights.io'
from azure.common.client_factory import *
class TestCommon(unittest.TestCase):
@mock.patch('azure.common.client_factory.get_cli_active_cloud')
@mock.patch('azure.common.client_factory.get_azure_cli_credentials')
def test_get_client_from_cli_profile(self, get_azure_cli_credentials, get_cli_active_cloud):
class FakeClient(object):
def __init__(self, credentials, subscription_id, base_url):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'should not be used'
self.credentials = credentials
self.subscription_id = subscription_id
self.base_url = base_url
class FakeSubscriptionClient(object):
def __init__(self, credentials, base_url):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if not base_url:
base_url = 'should not be used'
self.credentials = credentials
self.base_url = base_url
class GraphRbacManagementClient(object):
def __init__(self, credentials, tenant_id, base_url):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if tenant_id is None:
raise ValueError("Parameter 'tenant_id' must not be None.")
if not base_url:
base_url = 'should not be used'
self.credentials = credentials
self.tenant_id = tenant_id
self.base_url = base_url
class ApplicationInsightsDataClient(object):
def __init__(self, credentials, base_url):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if not base_url:
base_url = 'should not be used'
self.credentials = credentials
self.base_url = base_url
class KeyVaultClient(object):
def __init__(self, credentials):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
self.credentials = credentials
get_cli_active_cloud.return_value = AZURE_PUBLIC_CLOUD
get_azure_cli_credentials.return_value = 'credentials', 'subscription_id', 'tenant_id'
client = get_client_from_cli_profile(FakeClient)
get_azure_cli_credentials.assert_called_with(resource=None, with_tenant=True)
assert client.credentials == 'credentials'
assert client.subscription_id == 'subscription_id'
assert client.base_url == "https://management.azure.com/"
client = get_client_from_cli_profile(FakeSubscriptionClient)
get_azure_cli_credentials.assert_called_with(resource=None, with_tenant=True)
assert client.credentials == 'credentials'
assert client.base_url == "https://management.azure.com/"
client = get_client_from_cli_profile(GraphRbacManagementClient)
get_azure_cli_credentials.assert_called_with(resource="https://graph.windows.net/", with_tenant=True)
assert client.credentials == 'credentials'
assert client.tenant_id == 'tenant_id'
assert client.base_url == "https://graph.windows.net/"
client = get_client_from_cli_profile(ApplicationInsightsDataClient)
get_azure_cli_credentials.assert_called_with(resource="https://api.applicationinsights.io", with_tenant=True)
assert client.credentials == 'credentials'
assert client.base_url == "https://api.applicationinsights.io/v1"
client = get_client_from_cli_profile(KeyVaultClient)
get_azure_cli_credentials.assert_called_with(resource="https://vault.azure.net", with_tenant=True)
assert client.credentials == 'credentials'
@mock.patch('azure.common.client_factory.get_cli_active_cloud')
@mock.patch('azure.common.client_factory.get_azure_cli_credentials')
def test_get_client_from_cli_profile_core(self, get_azure_cli_credentials, get_cli_active_cloud):
class KeyVaultClientBase(object):
def __init__(self, vault_url, credential):
if not credential:
raise ValueError(
"credential should be an object supporting the TokenCredential protocol, "
"such as a credential from azure-identity"
)
if not vault_url:
raise ValueError("vault_url must be the URL of an Azure Key Vault")
self.credential = credential
self.vault_url = vault_url
class NewKeyVaultClient(KeyVaultClientBase):
pass
class StorageAccountHostsMixin(object):
def __init__(
self, account_url, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
try:
if not account_url.lower().startswith('http'):
account_url = "https://" + account_url
except AttributeError:
raise ValueError("Account URL must be a string.")
self.credential = credential
self.account_url = account_url
class BlobServiceClient(StorageAccountHostsMixin):
pass
get_cli_active_cloud.return_value = AZURE_PUBLIC_CLOUD
get_azure_cli_credentials.return_value = 'credential', 'subscription_id', 'tenant_id'
client = get_client_from_cli_profile(NewKeyVaultClient, vault_url="foo")
assert client.credential == 'credential'
assert client.vault_url == "foo"
client = get_client_from_cli_profile(BlobServiceClient, account_url="foo")
assert client.credential == 'credential'
assert client.account_url == "https://foo"
client = get_client_from_cli_profile(BlobServiceClient, account_url="foo", credential=None)
assert client.credential == None
assert client.account_url == "https://foo"
def test_get_client_from_auth_file(self):
configuration = {
"clientId": "a2ab11af-01aa-4759-8345-7803287dbd39",
"clientSecret": "password",
"subscriptionId": "15dbcfa8-4b93-4c9a-881c-6189d39f04d4",
"tenantId": "c81da1d8-65ca-11e7-b1d1-ecb1d756380e",
"activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
"resourceManagerEndpointUrl": "https://management.azure.com/",
"activeDirectoryGraphResourceId": "https://graph.windows.net/",
"sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
"galleryEndpointUrl": "https://gallery.azure.com/",
"managementEndpointUrl": "https://management.core.windows.net/"
}
class FakeClient(object):
def __init__(self, credentials, subscription_id, base_url):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'should not be used'
self.credentials = credentials
self.subscription_id = subscription_id
self.base_url = base_url
class FakeSubscriptionClient(object):
def __init__(self, credentials, base_url):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if not base_url:
base_url = 'should not be used'
self.credentials = credentials
self.base_url = base_url
class GraphRbacManagementClient(object):
def __init__(self, credentials, tenant_id, base_url):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if tenant_id is None:
raise ValueError("Parameter 'tenant_id' must not be None.")
if not base_url:
base_url = 'should not be used'
self.credentials = credentials
self.tenant_id = tenant_id
self.base_url = base_url
class KeyVaultClient(object):
def __init__(self, credentials):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
self.credentials = credentials
class KeyVaultClientTrack2(object):
def __init__(self, credential):
if credential is None:
raise ValueError("Parameter 'credentials' must not be None.")
self.credential = credential
for encoding in ['utf-8', 'utf-8-sig', 'ascii']:
temp_auth_file = tempfile.NamedTemporaryFile(delete=False)
temp_auth_file.write(json.dumps(configuration).encode(encoding))
temp_auth_file.close()
client = get_client_from_auth_file(FakeClient, temp_auth_file.name)
self.assertEqual('15dbcfa8-4b93-4c9a-881c-6189d39f04d4', client.subscription_id)
self.assertEqual('https://management.azure.com/', client.base_url)
self.assertTupleEqual(client.credentials._args, (
'https://management.azure.com/',
'a2ab11af-01aa-4759-8345-7803287dbd39',
'password'
))
client = get_client_from_auth_file(FakeClient, temp_auth_file.name, subscription_id='fakesubid')
self.assertEqual('fakesubid', client.subscription_id)
self.assertEqual('https://management.azure.com/', client.base_url)
self.assertTupleEqual(client.credentials._args, (
'https://management.azure.com/',
'a2ab11af-01aa-4759-8345-7803287dbd39',
'password'
))
credentials_instance = "Fake credentials class as a string"
client = get_client_from_auth_file(FakeClient, temp_auth_file.name, credentials=credentials_instance)
self.assertEqual('15dbcfa8-4b93-4c9a-881c-6189d39f04d4', client.subscription_id)
self.assertEqual('https://management.azure.com/', client.base_url)
self.assertEqual(credentials_instance, client.credentials)
client = get_client_from_auth_file(FakeSubscriptionClient, temp_auth_file.name)
self.assertEqual('https://management.azure.com/', client.base_url)
self.assertTupleEqual(client.credentials._args, (
'https://management.azure.com/',
'a2ab11af-01aa-4759-8345-7803287dbd39',
'password'
))
client = get_client_from_auth_file(GraphRbacManagementClient, temp_auth_file.name)
assert client.base_url == 'https://graph.windows.net/'
assert client.tenant_id == "c81da1d8-65ca-11e7-b1d1-ecb1d756380e"
assert client.credentials._args == (
"https://graph.windows.net/",
'a2ab11af-01aa-4759-8345-7803287dbd39',
'password'
)
client = get_client_from_auth_file(KeyVaultClient, temp_auth_file.name)
assert client.credentials._args == (
"https://vault.azure.net",
'a2ab11af-01aa-4759-8345-7803287dbd39',
'password'
)
with pytest.raises(ValueError) as excinfo:
get_client_from_auth_file(KeyVaultClientTrack2, temp_auth_file.name)
assert "https://aka.ms/azsdk/python/identity/migration" in str(excinfo.value)
os.unlink(temp_auth_file.name)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 44.578073
| 130
| 0.615591
|
79501a392f60a8275eeff58b243e5dbcd89abbc4
| 1,541
|
py
|
Python
|
dcplib/aws/sqs_handler.py
|
HumanCellAtlas/dcplib
|
5d1bd939393640b9623b040f13db1c97d83bcabd
|
[
"MIT"
] | 3
|
2019-02-28T06:03:05.000Z
|
2019-07-01T18:29:11.000Z
|
dcplib/aws/sqs_handler.py
|
HumanCellAtlas/dcplib
|
5d1bd939393640b9623b040f13db1c97d83bcabd
|
[
"MIT"
] | 64
|
2018-07-27T05:43:46.000Z
|
2020-01-04T14:17:59.000Z
|
dcplib/aws/sqs_handler.py
|
HumanCellAtlas/dcplib
|
5d1bd939393640b9623b040f13db1c97d83bcabd
|
[
"MIT"
] | 3
|
2019-11-01T14:24:25.000Z
|
2020-09-07T18:42:17.000Z
|
import json
from . import resources, clients
class SQSHandler:
"""
A class encapsulating the behaviors associated with interacting with an SQS Queue object and raising errors when
queue-related behaviors fail.
"""
def __init__(self, queue_name=None, queue_url=None):
if queue_name:
self.queue = resources.sqs.Queue(clients.sqs.get_queue_url(QueueName=queue_name)['QueueUrl'])
elif queue_url:
self.queue = resources.sqs.Queue(queue_url)
else:
raise Exception("Expected either queue_name or queue_url to be specified")
def add_message_to_queue(self, payload, **attributes):
""" Given a payload (a dict) and any optional message attributes (also a dict), add it to the queue. """
return self.queue.send_message(MessageBody=json.dumps(payload),
MessageAttributes={k: {"StringValue": v} for k, v in attributes.items()})
def receive_messages_from_queue(self, wait_time=15, num_messages=1):
""" Returns the first (according to FIFO) element in the queue; if none, then returns None."""
return self.queue.receive_messages(MaxNumberOfMessages=num_messages,
WaitTimeSeconds=wait_time)
def delete_message_from_queue(self, receipt_handle):
""" Deletes the specified element from the queue if it exists and does nothing otherwise. """
self.queue.delete_messages(Entries=[{'Id': '12345', 'ReceiptHandle': receipt_handle}])
| 42.805556
| 116
| 0.670344
|
79501aafeb88ecde0e880582b6709ca60908990e
| 12,199
|
py
|
Python
|
src/tests/ftest/datamover/dst_create.py
|
fedepad/daos
|
ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa
|
[
"BSD-2-Clause-Patent"
] | 429
|
2016-09-28T20:43:20.000Z
|
2022-03-25T01:22:50.000Z
|
src/tests/ftest/datamover/dst_create.py
|
fedepad/daos
|
ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa
|
[
"BSD-2-Clause-Patent"
] | 6,341
|
2016-11-24T12:34:26.000Z
|
2022-03-31T23:53:46.000Z
|
src/tests/ftest/datamover/dst_create.py
|
fedepad/daos
|
ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa
|
[
"BSD-2-Clause-Patent"
] | 202
|
2016-10-30T14:47:53.000Z
|
2022-03-30T21:29:11.000Z
|
#!/usr/bin/python
'''
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
from data_mover_test_base import DataMoverTestBase
from os.path import join
from pydaos.raw import DaosApiError
import avocado
class DmvrDstCreate(DataMoverTestBase):
# pylint: disable=too-many-ancestors
"""Data Mover validation for destination container create logic.
Test Class Description:
Tests the following cases:
Destination container automatically created.
Destination container user attributes match source.
Destination container properties match source.
:avocado: recursive
"""
def setUp(self):
"""Set up each test case."""
# Start the servers and agents
super().setUp()
# Get the parameters
self.ior_flags = self.params.get(
"ior_flags", "/run/ior/*")
self.test_file = self.ior_cmd.test_file.value
# For dataset_gen and dataset_verify
self.obj_list = []
def run_dm_dst_create(self, tool, cont_type, api, check_props):
"""
Test Description:
Tests Data Mover destination container creation.
Use Cases:
Create pool1.
Create POSIX cont1 in pool1.
Create small dataset in cont1.
Copy cont1 to a new cont in pool1, with a supplied UUID.
Copy cont1 to a new cont in pool1, without a supplied UUID.
Create pool2.
Copy cont1 to a new cont in pool2, with a supplied UUID.
Copy cont1 to a new cont in pool2, without a supplied UUID.
For each copy, very container properties and user attributes.
Repeat, but with container type Unknown.
"""
# Set the tool to use
self.set_tool(tool)
# Set the api to use
self.set_api(api)
# Create 1 pool
pool1 = self.create_pool()
pool1.connect(2)
# Create a source cont
cont1 = self.create_cont(pool1, cont_type=cont_type)
# Create source data
src_props = self.write_cont(cont1)
cont2_uuid = self.gen_uuid()
self.run_datamover(
self.test_id + " cont1 to cont2 (same pool) (supplied cont)",
"DAOS", "/", pool1, cont1,
"DAOS", "/", pool1, cont2_uuid)
cont2 = self.get_cont(pool1, cont2_uuid)
cont2.type.update(cont1.type.value, "type")
self.verify_cont(cont2, api, check_props, src_props)
result = self.run_datamover(
self.test_id + " cont1 to cont3 (same pool) (empty cont)",
"DAOS", "/", pool1, cont1,
"DAOS", "/", pool1, None)
cont3_uuid = self.parse_create_cont_uuid(result.stdout_text)
cont3 = self.get_cont(pool1, cont3_uuid)
cont3.type.update(cont1.type.value, "type")
self.verify_cont(cont3, api, check_props, src_props)
# Create another pool
pool2 = self.create_pool()
pool2.connect(2)
cont4_uuid = self.gen_uuid()
self.run_datamover(
self.test_id + " cont1 to cont4 (different pool) (supplied cont)",
"DAOS", "/", pool1, cont1,
"DAOS", "/", pool2, cont4_uuid)
cont4 = self.get_cont(pool2, cont4_uuid)
cont4.type.update(cont1.type.value, "type")
self.verify_cont(cont4, api, check_props, src_props)
result = self.run_datamover(
self.test_id + " cont1 to cont5 (different pool) (empty cont)",
"DAOS", "/", pool1, cont1,
"DAOS", "/", pool2, None)
cont5_uuid = self.parse_create_cont_uuid(result.stdout_text)
cont5 = self.get_cont(pool2, cont5_uuid)
cont5.type.update(cont1.type.value, "type")
self.verify_cont(cont5, api, check_props, src_props)
# Only test POSIX paths with DFS API
if api == "DFS":
# Create a posix source path
posix_path = join(self.new_posix_test_path(), self.test_file)
self.run_ior_with_params(
"POSIX", posix_path, flags=self.ior_flags[0])
cont6_uuid = self.gen_uuid()
self.run_datamover(
self.test_id + " posix to cont6 (supplied cont)",
"POSIX", posix_path, None, None,
"DAOS", "/", pool1, cont6_uuid)
cont6 = self.get_cont(pool1, cont6_uuid)
cont6.type.update(cont1.type.value, "type")
self.verify_cont(cont6, api, False)
result = self.run_datamover(
self.test_id + " posix to cont7 (empty cont)",
"POSIX", posix_path, None, None,
"DAOS", "/", pool1, None)
cont7_uuid = self.parse_create_cont_uuid(result.stdout_text)
cont7 = self.get_cont(pool1, cont7_uuid)
cont7.type.update(cont1.type.value, "type")
self.verify_cont(cont7, api, False)
pool1.disconnect()
pool2.disconnect()
def write_cont(self, cont):
"""Write the test data using either ior or the obj API.
Args:
cont (TestContainer): the container to write to.
Returns:
list: string list of properties from daos command.
"""
if cont.type.value == "POSIX":
self.run_ior_with_params(
"DAOS", "/" + self.test_file,
cont.pool, cont, flags=self.ior_flags[0])
else:
self.obj_list = self.dataset_gen(cont, 1, 1, 1, 0, [1024], [])
# Write the user attributes
cont.open()
attrs = self.get_cont_usr_attr()
cont.container.set_attr(attrs)
cont.close()
# Return existing cont properties
return self.get_cont_prop(cont)
def verify_cont(self, cont, api, check_attr_prop=True, prop_list=None):
"""Read-verify test data using either ior or the obj API.
Args:
cont (TestContainer): the container to verify.
check_attr_prop (bool, optional): whether to verify user
attributes and cont properties. Defaults to False.
prop_list (list, optional): list of properties from get_cont_prop.
Required when check_attr_prop is True.
"""
# It's important to check the properties first, since when ior
# mounts DFS the alloc'ed OID might be incremented.
if check_attr_prop:
cont.open()
self.verify_cont_prop(cont, prop_list, api)
self.verify_usr_attr(cont)
cont.close()
if cont.type.value == "POSIX":
# Verify POSIX containers copied with the DFS and Object APIs
self.run_ior_with_params(
"DAOS", "/" + self.test_file,
cont.pool, cont, flags=self.ior_flags[1])
else:
# Verify non-POSIX containers copied with the Object API
self.dataset_verify(self.obj_list, cont, 1, 1, 1, 0, [1024], [])
def get_cont_prop(self, cont):
"""Get all container properties with daos command.
Args:
cont (TestContainer): the container to get props of.
Returns:
list: string list of properties and values from daos command.
"""
prop_result = self.daos_cmd.container_get_prop(
cont.pool.uuid, cont.uuid)
prop_text = prop_result.stdout_text
prop_list = prop_text.split('\n')[1:]
return prop_list
def verify_cont_prop(self, cont, prop_list, api):
"""Verify container properties against an input list.
Expects the container to be open.
Args:
cont (TestContainer): the container to verify.
prop_list (list): list of properties from get_cont_prop.
"""
actual_list = self.get_cont_prop(cont)
# Make sure sizes match
if len(prop_list) != len(actual_list):
self.log.info("Expected\n%s\nbut got\n%s\n",
prop_list, actual_list)
self.fail("Container property verification failed.")
# Make sure each property matches
for prop_idx, prop in enumerate(prop_list):
# This one is not set
if api == "DFS" and "OID" in prop_list[prop_idx]:
continue
if prop != actual_list[prop_idx]:
self.log.info("Expected\n%s\nbut got\n%s\n",
prop_list, actual_list)
self.fail("Container property verification failed.")
self.log.info("Verified %d container properties:\n%s",
len(actual_list), actual_list)
@staticmethod
def get_cont_usr_attr():
"""Generate some user attributes"""
attrs = {}
attrs["attr1".encode("utf-8")] = "value 1".encode("utf-8")
attrs["attr2".encode("utf-8")] = "value 2".encode("utf-8")
return attrs
def verify_usr_attr(self, cont):
"""Verify user attributes. Expects the container to be open.
Args:
cont (TestContainer): the container to verify.
"""
attrs = self.get_cont_usr_attr()
actual_attrs = cont.container.get_attr(list(attrs.keys()))
# Make sure the sizes match
if len(attrs.keys()) != len(actual_attrs.keys()):
self.log.info("Expected\n%s\nbut got\n%s\n",
attrs.items(), actual_attrs.items())
self.fail("Container user attributes verification failed.")
# Make sure each attr matches
for attr, val in attrs.items():
if attr not in actual_attrs:
self.log.info("Expected\n%s\nbut got\n%s\n",
attrs.items(), actual_attrs.items())
self.fail("Container user attributes verification failed.")
if val != actual_attrs[attr]:
self.log.info("Expected\n%s\nbut got\n%s\n",
attrs.items(), actual_attrs.items())
self.fail("Container user attributes verification failed.")
self.log.info("Verified %d user attributes:\n%s",
len(attrs.keys()), attrs.items())
@avocado.fail_on(DaosApiError)
def test_dm_dst_create_dcp_posix_dfs(self):
"""
Test Description:
Verifies destination container creation
for DFS API, including
container properties and user attributes.
:avocado: tags=all,full_regression
:avocado: tags=datamover,dcp
:avocado: tags=dm_dst_create,dm_dst_create_dcp_posix_dfs
"""
self.run_dm_dst_create("DCP", "POSIX", "DFS", True)
@avocado.fail_on(DaosApiError)
def test_dm_dst_create_dcp_posix_daos(self):
"""
Test Description:
Verifies destination container creation
for POSIX containers using OBJ API, including
container properties and user attributes.
:avocado: tags=all,full_regression
:avocado: tags=datamover,dcp
:avocado: tags=dm_dst_create,dm_dst_create_dcp_posix_daos
"""
self.run_dm_dst_create("DCP", "POSIX", "DAOS", True)
@avocado.fail_on(DaosApiError)
def test_dm_dst_create_dcp_unknown_daos(self):
"""
Test Description:
Verifies destination container creation
when API is unknown, including
container properties and user attributes.
:avocado: tags=all,full_regression
:avocado: tags=datamover,dcp
:avocado: tags=dm_dst_create,dm_dst_create_dcp_unknown_daos
"""
self.run_dm_dst_create("DCP", None, "DAOS", True)
@avocado.fail_on(DaosApiError)
def test_dm_dst_create_fs_copy_posix_dfs(self):
"""
Test Description:
Verifies destination container creation
when API is unknown, including
container properties and user attributes.
:avocado: tags=all,full_regression
:avocado: tags=datamover,fs_copy
:avocado: tags=dm_dst_create,dm_dst_create_fs_copy_posix_dfs
"""
self.run_dm_dst_create("FS_COPY", "POSIX", "DFS", False)
| 36.966667
| 78
| 0.596934
|
79501c57ac300f7d4a0952562fb48b17595a8309
| 695
|
py
|
Python
|
fable/fable_sources/fable/test/tst_show_calls.py
|
hickerson/bbn
|
17ef63ad1717553ab2abb50592f8de79228c8523
|
[
"MIT"
] | 4
|
2016-09-30T15:03:39.000Z
|
2021-03-25T13:27:08.000Z
|
fable/fable_sources/fable/test/tst_show_calls.py
|
hickerson/bbn
|
17ef63ad1717553ab2abb50592f8de79228c8523
|
[
"MIT"
] | 1
|
2018-04-18T14:41:18.000Z
|
2018-04-20T19:33:52.000Z
|
fable/fable_sources/fable/test/tst_show_calls.py
|
hickerson/bbn
|
17ef63ad1717553ab2abb50592f8de79228c8523
|
[
"MIT"
] | 3
|
2016-04-19T18:20:30.000Z
|
2019-04-03T14:54:29.000Z
|
from __future__ import division
def run(args):
assert len(args) == 0
import libtbx.load_env
from cStringIO import StringIO
import os
op = os.path
t_dir = libtbx.env.under_dist(
module_name="fable", path="test/valid", test=op.isdir)
excluded_file_names = set("""\
blockdata_unnamed.f
""".splitlines())
from fable.command_line import show_calls
for file_name in os.listdir(t_dir):
if (not file_name.endswith(".f")): continue
if (file_name in excluded_file_names): continue
sys.stdout = StringIO()
show_calls.run(args=[op.join(t_dir, file_name)])
sys.stdout = sys.__stdout__
print "OK"
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
| 27.8
| 58
| 0.705036
|
79501c685578b8dfe069cef093080e87a0500853
| 971
|
py
|
Python
|
example/tutorials/code/rosetta_naive_protocol.py
|
00make/Rosetta-zh
|
75f4f59956bd24bbf16637d94bede8b65c9db017
|
[
"Apache-2.0"
] | null | null | null |
example/tutorials/code/rosetta_naive_protocol.py
|
00make/Rosetta-zh
|
75f4f59956bd24bbf16637d94bede8b65c9db017
|
[
"Apache-2.0"
] | null | null | null |
example/tutorials/code/rosetta_naive_protocol.py
|
00make/Rosetta-zh
|
75f4f59956bd24bbf16637d94bede8b65c9db017
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Import rosetta package
import latticex.rosetta as rtt
import tensorflow as tf
# Attention!
# This is just for presentation of integrating a new protocol.
# NEVER USE THIS PROTOCOL IN PRODUCTION ENVIRONMENT!
rtt.activate("Naive")
# Get private data from P0 and P1
matrix_a = tf.Variable(rtt.private_console_input(0, shape=(3, 2)))
matrix_b = tf.Variable(rtt.private_console_input(1, shape=(3, 2)))
# Just use the native tf.multiply operation.
cipher_result = tf.multiply(matrix_a, matrix_b)
# Start execution
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Take a glance at the ciphertext
cipher_a = sess.run(matrix_a)
print('local shared matrix a:\n', cipher_a)
cipher_result_v = sess.run(cipher_result)
print('local ciphertext result:\n', cipher_result_v)
# Get the result of Rosetta multiply
print('plaintext result:\n', sess.run(rtt.SecureReveal(cipher_result)))
rtt.deactivate()
| 31.322581
| 75
| 0.746653
|
79501c75986e25ebca8b9c4edc71b4fd9256b1f5
| 4,332
|
py
|
Python
|
src/eval.py
|
Artcs1/piou2
|
6ffda363b97969ea8e1c933a90d46720196a42b1
|
[
"MIT"
] | 177
|
2020-07-22T08:06:11.000Z
|
2022-03-04T03:24:13.000Z
|
src/eval.py
|
gds101054108/piou
|
98f8d068a903d295f990609d8f90e4136e836495
|
[
"MIT"
] | 18
|
2020-07-30T14:05:09.000Z
|
2021-12-15T12:21:13.000Z
|
src/eval.py
|
gds101054108/piou
|
98f8d068a903d295f990609d8f90e4136e836495
|
[
"MIT"
] | 47
|
2020-07-21T01:38:55.000Z
|
2022-03-04T03:24:15.000Z
|
import sys
from pycocotools.coco import COCO
import os
import cv2
import numpy as np
import mmcv
import codecs
import pandas as pd
import glob
CENTERNET_PATH = '/datadrive/sangliang/CenterNet/src/lib/'
sys.path.insert(0, CENTERNET_PATH)
from detectors.detector_factory import detector_factory
from opts import opts
thres=0.3
MODEL_PATH = '/datadrive/sangliang/CenterNet/exp/ctdet/coco_dla/model_best.pth'
TASK = 'ctdet' # or 'multi_pose' for human pose estimation
opt = opts().init('{} --load_model {} --arch dla6channels_34 --dataset BottleTracking'.format(TASK, MODEL_PATH).split(' '))
detector = detector_factory[opt.task](opt)
ann_file='/datadrive/sangliang/CenterNet/data/BottleTracking/annotations/val.json'
#ann_file='/datadrive/sangliang/CenterNet/data/BottleTracking/tongxin_eval_dataset/annotations/eval.json'
coco = COCO(ann_file)
cat_ids = coco.getCatIds()
cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(cat_ids)
}
img_ids = coco.getImgIds()
img_infos = []
output_folder = '/datadrive/sangliang/CenterNet/data/BottleTracking/eval'
mmcv.mkdir_or_exist(output_folder)
predict_bboxes = []
gt_bboxes = []
outfile = codecs.open('/datadrive/sangliang/CenterNet/data/BottleTracking/thres_{}_result.csv'.format(thres), 'w', encoding='utf-8')
outfile.write('ImgUrl,xmin,ymin,xmax,ymax,prob'+'\n')
csv_path_list = glob.glob(os.path.join('/datadrive/sangliang/CenterNet/data/BottleTracking/task_csvs', '*.csv'))
df = pd.DataFrame()
for csv_path in csv_path_list:
df=df.append(pd.read_csv(csv_path, index_col=False, encoding='utf-8'))
df=df.loc[df.ImageQuality=='["qualified"]']
# gt_df = pd.read_csv('/datadrive/sangliang/CenterNet/data/BottleTracking/tongxin_eval_dataset/gt_name.csv')['image_name'].tolist()
for i in img_ids:
info = coco.loadImgs([i])[0]
img_path = os.path.join('/datadrive/sangliang/CenterNet/data/BottleTracking/images',
info['file_name'])
# if info['file_name'] not in gt_df:
# print(info['file_name'])
# continue
tmp_img = cv2.imread(img_path)
left_img = tmp_img[:, :tmp_img.shape[1] // 2, :]
right_img = tmp_img[:, tmp_img.shape[1] // 2:, :]
img = np.concatenate((left_img, right_img), axis=2)
ret = detector.run(img)['results']
for bbox in ret[1]:
if bbox[4] > thres:
box = np.array(bbox[0:4], dtype=np.int32)
txt = '{}{:.5f}'.format('unit', bbox[4])
font = cv2.FONT_HERSHEY_SIMPLEX
c=(0, 0, 255)
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
cv2.rectangle(
tmp_img, (box[0], box[1]), (box[2], box[3]), c, 2)
outfile.write(info['flickr_url']+','+str(box[0])+','+str(box[1])+','+str(box[2])+','+str(box[3])+','+str(bbox[4])+'\n')
cv2.rectangle(tmp_img,
(box[0], box[1] - cat_size[1] - 2),
(box[0] + cat_size[0], box[1] - 2), c, -1)
cv2.putText(tmp_img, txt, (box[0], box[1] - 2),
font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
ann_ids = coco.getAnnIds(imgIds=[i])
anns = coco.loadAnns(ids=ann_ids)
# for k in range(len(anns)):
# ann = anns[k]
# box=ann['bbox']
# cv2.rectangle(
# tmp_img, (int(box[0]), int(box[1])), (int(box[2]+box[0]), int(box[3]+box[1])), (255,255,0), 2)
# outfile.write(info['flickr_url'] + ',' + str(
# box[0]) + ',' + str(box[1]) + ',' + str(box[2]+box[0]) + ',' + str(box[3]+box[1]) + ',' + str(2.0)+'\n')
url_df = df.loc[df['ImgUrl'] == info['flickr_url']]
for index, row in url_df.iterrows():
ProductId = row['ProductId']
if ProductId == 1047936:
outfile.write(info['flickr_url'] + ',' + str(
row['xmin']*tmp_img.shape[1]) + ',' + str(row['ymin']*tmp_img.shape[0]) + ',' + str((row['xmax'])*tmp_img.shape[1]) + ',' + str((row['ymax'])*tmp_img.shape[0]) + ',' + str(1047636.0)+'\n')
else:
outfile.write(info['flickr_url'] + ',' + str(
row['xmin']*tmp_img.shape[1]) + ',' + str(row['ymin']*tmp_img.shape[0]) + ',' + str((row['xmax'])*tmp_img.shape[1]) + ',' + str((row['ymax'])*tmp_img.shape[0]) + ',' + str(2.0)+'\n')
cv2.rectangle(
tmp_img, (int(row['xmin']*tmp_img.shape[1]), int(row['ymin']*tmp_img.shape[0])), (int((row['xmax'])*tmp_img.shape[1]), int((row['ymax'])*tmp_img.shape[0])), (255,255,0), 2)
cv2.imwrite(os.path.join(output_folder,info['file_name']), tmp_img)
| 44.659794
| 196
| 0.638273
|
79501c81e535cd09aa50da6e5e608833a5696a6b
| 4,342
|
py
|
Python
|
get.py
|
yanxyz/cssgaga-changelog
|
88fa780743e56f3bf131631992f2f550ce157ee9
|
[
"MIT"
] | null | null | null |
get.py
|
yanxyz/cssgaga-changelog
|
88fa780743e56f3bf131631992f2f550ce157ee9
|
[
"MIT"
] | null | null | null |
get.py
|
yanxyz/cssgaga-changelog
|
88fa780743e56f3bf131631992f2f550ce157ee9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Ivan Yan 2013-12-20
# 获取并整合 CssGaGa 更新日志
#
# 2014-04-10
# 改为 python 3.4
#
from __future__ import unicode_literals
import os
import sys
import subprocess
import re
import codecs
import json
import argparse
changelog = 'CHANGELOG.txt' # 整合的日志
currenttxt = 'current.txt' # svn 日志
logdir = './log' # log 文件放置目录
if not os.path.exists(logdir):
os.mkdir(logdir)
logjson = os.path.join(logdir, 'log.json')
def sav_config():
open(logjson, 'w').write(json.dumps(log))
if not os.path.exists(logjson):
log = {
'r0': 0, # 远程的 revision
'r': 0, # 已获取的 revision
'n': 0, # 已处理的 revision
'a': [] # 有 current.txt 的 revision
}
sav_config()
else:
log = json.load(open(logjson, 'r'))
# 获取版本
# 版本在 current.txt 中, 只签出此文件
def check():
'''检查 revision
注意检查更新后没有自动获取更新
'''
output = subprocess.check_output(["svn", "up", currenttxt]).decode("latin")
print(output)
m = re.search(r'\d+', output)
remote_r = int(m.group(0))
tip = True
if remote_r == log['r0']:
if log['r'] == log['r0']:
tip = False
print('已经是最新的')
else:
log['r0'] = remote_r
sav_config()
print('有更新')
if tip:
print('使用参数 -u 来获取更新')
def up(num):
'''获取指定的 revision
更新如果包含 current.txt 会提示 Updated to revision xxx.
有的更新没有,比如 r1 只是初始化目录,这时跳过
https://code.google.com/p/cssgaga/source/list?num=25&start=6
'''
str_num = str(num)
print('revision ' + str_num + " ..." )
output = subprocess.check_output(["svn", "up", "-r" + str_num, currenttxt]).decode("latin")
search = 'Updated to revision ' + str_num
if search in output:
logfile = os.path.join(logdir, str_num + '.txt')
# 如果已存在则删除,保证可以重复运行
if os.path.exists(logfile):
os.remove(logfile)
os.rename(currenttxt, logfile)
if not num in log['a']:
log['a'].append(num)
print('done')
else:
print('skip')
def upall():
'''获取全部 revision'''
if log['r'] < log['r0']:
print('开始获取日志...')
for i in range(log['r'] + 1, log['r0'] + 1):
up(i)
log['r'] = log['r0']
sav_config()
print('已获取全部日志')
else:
if log['r0'] == 0:
print('使用参数 -c 检查更新')
else:
print('已经是最新的')
# 按序从日志文件中提取版本
# 使用 map 结构: key 为版本号, value 为此版本号的更新信息
# 不同日志文件之间重复的版本,取后面的
def split_log(logfile, d = {}):
# s = open(logfile, 'r').read()
# 这里假定只有 gbk 与 utf-8 两种编码, 可能出错
try:
s = open(logfile, encoding='gbk').read()
except UnicodeDecodeError:
s = open(logfile, encoding='utf-8-sig').read()
p = re.compile(r'^[ \t]*(\d+)[ \t]*\n', re.M)
vers = p.findall(s)
contents = p.split(s)
for v in vers:
j = contents.index(v)
k = int(v)
d[k] = contents[j+1].strip()
return d
def save(d):
content = ''
keys = sorted(d)
for k in keys:
content = str(k) + '\n' + d[k] + '\n\n' + content
open(changelog, mode='w', encoding='utf-8').write(content)
def merge_all():
'''重新合并本地所有日志'''
print('开始合并...')
d = {}
for i in range(len(log['a'])):
logfile = os.path.join(logdir, str(log['a'][i]) + '.txt')
d = split_log(logfile, d)
save(d)
print('合并完成')
def merge_up():
'''增量合并'''
if log['n'] < log['r']:
print('开始合并...')
d = split_log(changelog) if os.path.exists(changelog) else {}
for i in range(log['n'] + 1, log['r'] +1):
if i in log['a']:
logfile = os.path.join(logdir, str(i) + '.txt')
d = split_log(logfile, d)
save(d)
log['n'] = log['r']
sav_config()
print('合并完成')
else:
if log['r'] == 0:
print('使用参数 -c 检查更新')
else:
print('没有新内容供合并')
def test():
# content = open('log/119.txt', 'r').read()
content = open('log/5.txt', 'r').read()
# 处理 utf-8 bom
# http://stackoverflow.com/questions/17912307/u-ufeff-in-python-string
if content.startswith(codecs.BOM_UTF8):
content = content.decode('utf-8-sig')
p = re.compile(r'^[ \t]*(\d+)[ \t]*$', re.M)
vers = p.findall(content)
print(vers)
contents = p.split(content)
print(contents)
def test_save():
log['a'] = [116, 117, 118, 119]
merge_all()
if __name__ == '__main__':
# test()
parser = argparse.ArgumentParser(prog='getlog', description='获取并整合 CssGaga 更新日志')
parser.add_argument('-c', action='store_true', help="检查更新")
parser.add_argument('-u', action='store_true', help="获取检查到的更新")
parser.add_argument('-a', action='store_true', help="重新合并本地所有更新日志")
args = parser.parse_args()
# 只支持一个选项
if args.c:
check()
elif args.u:
upall()
merge_up()
elif args.a:
merge_all()
else:
parser.parse_args('-h'.split())
| 20.77512
| 92
| 0.622754
|
79501de39858f86defcb00475e8982760d1b52f5
| 8,370
|
py
|
Python
|
dexp/datasets/operations/deconv.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 16
|
2021-04-21T14:09:19.000Z
|
2022-03-22T02:30:59.000Z
|
dexp/datasets/operations/deconv.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 28
|
2021-04-15T17:43:08.000Z
|
2022-03-29T16:08:35.000Z
|
dexp/datasets/operations/deconv.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 3
|
2022-02-08T17:41:30.000Z
|
2022-03-18T15:32:27.000Z
|
import functools
from pathlib import Path
from typing import List, Optional, Sequence, Tuple
import dask
import numpy
import scipy
from arbol.arbol import aprint, asection
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from dexp.datasets import BaseDataset
from dexp.optics.psf.standard_psfs import nikon16x08na, olympus20x10na
from dexp.processing.deconvolution import (
admm_deconvolution,
lucy_richardson_deconvolution,
)
from dexp.processing.filters.fft_convolve import fft_convolve
from dexp.processing.utils.scatter_gather_i2i import scatter_gather_i2i
from dexp.utils.backends import Backend, BestBackend
from dexp.utils.slicing import slice_from_shape
def dataset_deconv(
dataset: BaseDataset,
dest_path: str,
channels: Sequence[str],
slicing,
store: str = "dir",
compression: str = "zstd",
compression_level: int = 3,
overwrite: bool = False,
tilesize: Optional[Tuple[int]] = None,
method: str = "lr",
num_iterations: int = 16,
max_correction: int = 16,
power: float = 1,
blind_spot: int = 0,
back_projection: Optional[str] = None,
wb_order: int = 5,
psf_objective: str = "nikon16x08na",
psf_na: float = 0.8,
psf_dxy: float = 0.485,
psf_dz: float = 2,
psf_xy_size: int = 17,
psf_z_size: int = 17,
psf_show: bool = False,
scaling: Optional[Tuple[float]] = None,
workers: int = 1,
workersbackend: str = "",
devices: Optional[List[int]] = None,
check: bool = True,
stop_at_exception: bool = True,
):
from dexp.datasets import ZDataset
mode = "w" + ("" if overwrite else "-")
dest_dataset = ZDataset(dest_path, mode, store, parent=dataset)
# Default tile size:
if tilesize is None:
tilesize = 320 # very conservative
# Scaling default value:
if scaling is None:
scaling = (1, 1, 1)
sz, sy, sx = scaling
aprint(f"Input images will be scaled by: (sz,sy,sx)={scaling}")
# CUDA DASK cluster
cluster = LocalCUDACluster(CUDA_VISIBLE_DEVICES=devices)
client = Client(cluster)
aprint("Dask Client", client)
lazy_computation = []
for channel in dataset._selected_channels(channels):
array = dataset.get_array(channel)
aprint(f"Slicing with: {slicing}")
out_shape, volume_slicing, time_points = slice_from_shape(array.shape, slicing)
out_shape = tuple(int(round(u * v)) for u, v in zip(out_shape, (1,) + scaling))
dtype = numpy.float16 if method == "admm" else array.dtype
# Adds destination array channel to dataset
dest_array = dest_dataset.add_channel(
name=channel, shape=out_shape, dtype=dtype, codec=compression, clevel=compression_level
)
# This is not ideal but difficult to avoid right now:
sxy = (sx + sy) / 2
# PSF paraneters:
psf_kwargs = {
"dxy": psf_dxy / sxy,
"dz": psf_dz / sz,
"xy_size": int(round(psf_xy_size * sxy)),
"z_size": int(round(psf_z_size * sz)),
}
aprint(f"psf_kwargs: {psf_kwargs}")
# NA override:
if psf_na is not None:
aprint(f"Numerical aperture overridden to a value of: {psf_na}")
psf_kwargs["NA"] = psf_na
# choose psf from detection optics:
if psf_objective == "nikon16x08na":
psf_kernel = nikon16x08na(**psf_kwargs)
elif psf_objective == "olympus20x10na":
psf_kernel = olympus20x10na(**psf_kwargs)
elif Path(psf_objective).exists():
psf_kernel = numpy.load(psf_objective)
if sz != 1.0 or sy != 1.0 or sx != 1.0:
psf_kernel = scipy.ndimage.interpolation.zoom(psf_kernel, zoom=(sz, sy, sx), order=1)
psf_z_size = psf_kernel.shape[0] + 10
psf_xy_size = max(psf_kernel.shape[1:]) + 10
else:
raise RuntimeError(f"Object/path {psf_objective} not found.")
# usefull for debugging:
if psf_show:
import napari
viewer = napari.Viewer(title="DEXP | viewing PSF with napari", ndisplay=3)
viewer.add_image(psf_kernel)
napari.run()
margins = max(psf_xy_size, psf_z_size)
if method == "lr":
normalize = False
convolve = functools.partial(fft_convolve, in_place=False, mode="reflect", internal_dtype=numpy.float32)
def deconv(image):
min_value = image.min()
max_value = image.max()
return lucy_richardson_deconvolution(
image=image,
psf=psf_kernel,
num_iterations=num_iterations,
max_correction=max_correction,
normalise_minmax=(min_value, max_value),
power=power,
blind_spot=blind_spot,
blind_spot_mode="median+uniform",
blind_spot_axis_exclusion=(0,),
wb_order=wb_order,
back_projection=back_projection,
convolve_method=convolve,
)
elif method == "admm":
normalize = True
def deconv(image):
out = admm_deconvolution(
image,
psf=psf_kernel,
iterations=num_iterations,
derivative=2,
)
return out
else:
raise ValueError(f"Unknown deconvolution mode: {method}")
@dask.delayed
def process(i):
tp = time_points[i]
try:
with asection(f"Deconvolving time point for time point {i}/{len(time_points)}"):
with asection(f"Loading channel: {channel}"):
tp_array = numpy.asarray(array[tp][volume_slicing])
with BestBackend(exclusive=True, enable_unified_memory=True):
if sz != 1.0 or sy != 1.0 or sx != 1.0:
with asection(f"Applying scaling {(sz, sy, sx)} to image."):
sp = Backend.get_sp_module()
tp_array = Backend.to_backend(tp_array)
tp_array = sp.ndimage.interpolation.zoom(tp_array, zoom=(sz, sy, sx), order=1)
tp_array = Backend.to_numpy(tp_array)
with asection(
f"Deconvolving image of shape: {tp_array.shape}, with tile size: {tilesize}, "
+ "margins: {margins} "
):
aprint(f"Number of iterations: {num_iterations}, back_projection:{back_projection}, ")
tp_array = scatter_gather_i2i(
deconv,
tp_array,
tiles=tilesize,
margins=margins,
normalise=normalize,
internal_dtype=dtype,
)
with asection("Moving array from backend to numpy."):
tp_array = Backend.to_numpy(tp_array, dtype=dest_array.dtype, force_copy=False)
with asection(
f"Saving deconvolved stack for time point {i}, shape:{tp_array.shape}, dtype:{array.dtype}"
):
dest_dataset.write_stack(channel=channel, time_point=i, stack_array=tp_array)
aprint(f"Done processing time point: {i}/{len(time_points)} .")
except Exception as error:
aprint(error)
aprint(f"Error occurred while processing time point {i} !")
import traceback
traceback.print_exc()
if stop_at_exception:
raise error
for i in range(len(time_points)):
lazy_computation.append(process(i))
dask.compute(*lazy_computation)
# Dataset info:
aprint(dest_dataset.info())
# Check dataset integrity:
if check:
dest_dataset.check_integrity()
# close destination dataset:
dest_dataset.close()
client.close()
| 35.168067
| 116
| 0.564636
|
79501f2744b77038ab7dd23922e0095416d78200
| 707
|
py
|
Python
|
mcloud/test_utils.py
|
modera-manyrepo-packages/mcloud
|
8ce3b1cc7bac01682a41c7b9d8d82f13a853d223
|
[
"Apache-2.0"
] | null | null | null |
mcloud/test_utils.py
|
modera-manyrepo-packages/mcloud
|
8ce3b1cc7bac01682a41c7b9d8d82f13a853d223
|
[
"Apache-2.0"
] | null | null | null |
mcloud/test_utils.py
|
modera-manyrepo-packages/mcloud
|
8ce3b1cc7bac01682a41c7b9d8d82f13a853d223
|
[
"Apache-2.0"
] | null | null | null |
from decorator import contextmanager
from flexmock import flexmock
import inject
from mcloud.txdocker import IDockerClient, DockerTwistedClient
def fake_inject(services):
def configurator(binder):
for key, item in services.items():
binder.bind(key, item)
inject.clear_and_configure(configurator)
@contextmanager
def real_docker():
def configurator(binder):
binder.bind_to_constructor(IDockerClient, lambda: DockerTwistedClient())
inject.clear_and_configure(configurator)
yield
@contextmanager
def mock_docker():
mock = flexmock(DockerTwistedClient())
inject.clear_and_configure(lambda binder: binder.bind(IDockerClient, mock))
yield mock
| 23.566667
| 80
| 0.756719
|
7950202eee4765b131bf2c702b5844f9b64b3b30
| 10,302
|
py
|
Python
|
pims/formats/utils/reader.py
|
hurondp/pims
|
7d08bc4246375255611011d465db9de89fdc6ce9
|
[
"Apache-2.0"
] | null | null | null |
pims/formats/utils/reader.py
|
hurondp/pims
|
7d08bc4246375255611011d465db9de89fdc6ce9
|
[
"Apache-2.0"
] | 2
|
2022-01-05T10:39:54.000Z
|
2022-01-06T12:54:04.000Z
|
pims/formats/utils/reader.py
|
hurondp/pims
|
7d08bc4246375255611011d465db9de89fdc6ce9
|
[
"Apache-2.0"
] | 1
|
2022-02-14T18:28:31.000Z
|
2022-02-14T18:28:31.000Z
|
# * Copyright (c) 2020-2021. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Optional, TYPE_CHECKING, Tuple, Union
import numpy as np
from pims.processing.adapters import RawImagePixels
from pims.processing.region import Region, Tile
from pims.utils.iterables import ensure_list
if TYPE_CHECKING:
from pims.formats import AbstractFormat
class AbstractReader(ABC):
"""
Base reader. All format readers must extend this class.
"""
def __init__(self, format: AbstractFormat):
self.format = format
@abstractmethod
def read_thumb(
self, out_width: int, out_height: int, precomputed: bool = None,
c: Optional[Union[int, List[int]]] = None, z: Optional[int] = None, t: Optional[int] = None
) -> RawImagePixels:
"""
Get an image thumbnail whose dimensions are the nearest possible to
asked output dimensions.
Output dimensions are best-effort, that is, depending on the format
and the underlying library used to extract pixels from the image format,
it may or may not be possible to return a thumbnail at the asked output
dimensions. The implementation SHOULD try to return the nearest possible
thumbnail using format capabilities (such as shrink on load features)
but MUST NOT perform any resize operation after read (in that case, an
optimized resize operator is used in post-processing). In all cases:
* `true_out_width >= out_width`
* `true_out_height >= out_height`
Returned channels are best-effort, that is, depending on the format
and the underlying library used to extract pixels from the image format,
it may or may not be possible to return only the asked channels.
Parameters
----------
out_width
The asked output width (best-effort)
out_height
The asked output height (best-effort)
precomputed
Whether use precomputed thumbnail stored in the file if available.
Retrieving precomputed thumbnail should be faster than computing
the thumbnail from scratch (for multi-giga pixels images), but there
is no guarantee the precomputed thumb has the same quality.
c
The asked channel index(es).
If not set, all channels are considered.
z
The asked z-slice index. Image formats without Z-stack support
can safely ignore this parameter. Behavior is undetermined if `z`
is not set for an image format with Z-stack support.
t
The asked timepoint index. Image formats without time support
can safely ignore this parameter. Behavior is undetermined if `t`
is not set for an image format with time support.
Returns
-------
RawImagePixels
"""
raise NotImplementedError()
@abstractmethod
def read_window(
self, region: Region, out_width: int, out_height: int,
c: Optional[Union[int, List[int]]] = None, z: Optional[int] = None, t: Optional[int] = None
) -> RawImagePixels:
"""
Get an image window whose output dimensions are the nearest possible to
asked output dimensions.
Output dimensions are best-effort, that is, depending on the format,
the image pyramid characteristics, and the underlying library used to
extract pixels from the image format, it may or may not be possible to
return a window at the asked output dimensions. The implementation
SHOULD try to return the nearest possible window using format
capabilities (such as shrink on load features) but MUST NOT perform any
resize operation after read (in that case, an optimized resize operator
is used in post-processing). In all cases:
* `true_out_width >= out_width`
* `true_out_height >= out_height`
The implementation is responsible to find the most appropriate pyramid
tier to get the given region at asked output dimensions.
Returned channels are best-effort, that is, depending on the format
and the underlying library used to extract pixels from the image format,
it may or may not be possible to return only the asked channels.
Parameters
----------
region
A 2D region at a given downsample
out_width
The asked output width (best-effort)
out_height
The asked output height (best-effort)
c
The asked channel index(es).
If not set, all channels are considered.
z
The asked z-slice index. Image formats without Z-stack support
can safely ignore this parameter. Behavior is undetermined if `z`
is not set for an image format with Z-stack support.
t
The asked timepoint index. Image formats without time support
can safely ignore this parameter. Behavior is undetermined if `t`
is not set for an image format with time support.
Returns
-------
RawImagePixels
"""
raise NotImplementedError()
@abstractmethod
def read_tile(
self, tile: Tile,
c: Optional[Union[int, List[int]]] = None, z: Optional[int] = None, t: Optional[int] = None
) -> RawImagePixels:
"""
Get an image tile. It is a particular case of `read_window` where the
width and height of the region described by the tile at its downsample
match the asked output dimensions. As the tile is linked to a pyramid
tier, the tile downsample matches the downsample of a tier in the image
pyramid.
Output dimensions correspond to the tile width and height.
Returned channels are best-effort, that is, depending on the format
and the underlying library used to extract pixels from the image format,
it may or may not be possible to return only the asked channels.
Note: non tiled formats can fallback on `read_window`.
Parameters
----------
tile
A 2D region at a given downsample (linked to a pyramid tier)
c
The asked channel index(es).
If not set, all channels are considered.
z
The asked z-slice index. Image formats without Z-stack support
can safely ignore this parameter. Behavior is undetermined if `z`
is not set for an image format with Z-stack support.
t
The asked timepoint index. Image formats without time support
can safely ignore this parameter. Behavior is undetermined if `t`
is not set for an image format with time support.
Returns
-------
RawImagePixels
"""
raise NotImplementedError()
def read_label(self, out_width: int, out_height: int) -> Optional[RawImagePixels]:
"""
Get a precomputed image label whose output dimensions are the nearest
possible to asked output dimensions.
Output dimensions are best-effort, that is, depending on the format,
the image pyramid characteristics, and the underlying library used to
extract pixels from the image format, it may or may not be possible to
return a label at the asked output dimensions. The implementation
SHOULD try to return the nearest possible label using format
capabilities (such as shrink on load features) but MUST NOT perform any
resize operation after read (in that case, an optimized resize operator
is used in post-processing). In all cases:
* `true_out_width >= out_width`
* `true_out_height >= out_height`
Parameters
----------
out_width
The asked output width (best-effort)
out_height
The asked output height (best-effort)
Returns
-------
RawImagePixels
"""
return None
def read_macro(self, out_width: int, out_height: int) -> Optional[RawImagePixels]:
"""
Get a precomputed image macro whose output dimensions are the nearest
possible to asked output dimensions.
Output dimensions are best-effort, that is, depending on the format,
the image pyramid characteristics, and the underlying library used to
extract pixels from the image format, it may or may not be possible to
return a macro at the asked output dimensions. The implementation
SHOULD try to return the nearest possible macro using format
capabilities (such as shrink on load features) but MUST NOT perform any
resize operation after read (in that case, an optimized resize operator
is used in post-processing). In all cases:
* `true_out_width >= out_width`
* `true_out_height >= out_height`
Parameters
----------
out_width
The asked output width (best-effort)
out_height
The asked output height (best-effort)
Returns
-------
RawImagePixels
"""
return None
def _concrete_channel_indexes(
self, channels: Optional[Union[int, List[int]]]
) -> Tuple[list, list]:
if channels is None:
channels = np.arange(self.format.main_imd.n_channels)
else:
channels = np.asarray(ensure_list(channels))
spp = self.format.main_imd.n_samples
cc_idxs = channels // spp
s_idxs = channels % spp
return cc_idxs, s_idxs
| 40.559055
| 99
| 0.651912
|
795021749e83ee84c871792971aee5703ff86515
| 4,807
|
py
|
Python
|
sdk/keyvault/azure-keyvault-secrets/samples/hello_world_async.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-06-02T08:01:35.000Z
|
2021-06-02T08:01:35.000Z
|
sdk/keyvault/azure-keyvault-secrets/samples/hello_world_async.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-secrets/samples/hello_world_async.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | null | null | null |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import datetime
import os
import asyncio
from azure.keyvault.secrets.aio import SecretClient
from azure.identity.aio import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-secrets and azure-identity libraries (pip install these)
#
# 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_URL
# (See https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/keyvault/azure-keyvault-keys#authenticate-the-client)
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates the basic CRUD operations on a vault(secret) resource for Azure Key Vault
#
# 1. Create a new secret (set_secret)
#
# 2. Get an existing secret (get_secret)
#
# 3. Update an existing secret's properties (update_secret_properties)
#
# 4. Delete a secret (delete_secret)
#
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a secret client that will be used to call the service.
# Notice that the client is using default Azure credentials.
# To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID',
# 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = SecretClient(vault_url=VAULT_URL, credential=credential)
try:
# Let's create a secret holding bank account credentials valid for 1 year.
# if the secret already exists in the key vault, then a new version of the secret is created.
print("\n.. Create Secret")
expires_on = datetime.datetime.utcnow() + datetime.timedelta(days=365)
secret = await client.set_secret("helloWorldSecretName", "helloWorldSecretValue", expires_on=expires_on)
print("Secret with name '{0}' created with value '{1}'".format(secret.name, secret.value))
print("Secret with name '{0}' expires on '{1}'".format(secret.name, secret.properties.expires_on))
# Let's get the bank secret using its name
print("\n.. Get a Secret by name")
bank_secret = await client.get_secret(secret.name)
print("Secret with name '{0}' was found with value '{1}'.".format(bank_secret.name, bank_secret.value))
# After one year, the bank account is still active, we need to update the expiry time of the secret.
# The update method can be used to update the expiry attribute of the secret. It cannot be used to update
# the value of the secret.
print("\n.. Update a Secret by name")
expires_on = bank_secret.properties.expires_on + datetime.timedelta(days=365)
updated_secret_properties = await client.update_secret_properties(secret.name, expires_on=expires_on)
print(
"Secret with name '{0}' was updated on date '{1}'".format(
updated_secret_properties.name, updated_secret_properties.updated_on
)
)
print(
"Secret with name '{0}' was updated to expire on '{1}'".format(
updated_secret_properties.name, updated_secret_properties.expires_on
)
)
# Bank forced a password update for security purposes. Let's change the value of the secret in the key vault.
# To achieve this, we need to create a new version of the secret in the key vault. The update operation cannot
# change the value of the secret.
new_secret = await client.set_secret(secret.name, "newSecretValue")
print("Secret with name '{0}' created with value '{1}'".format(new_secret.name, new_secret.value))
# The bank account was closed, need to delete its credentials from the Key Vault.
print("\n.. Deleting Secret...")
deleted_secret = await client.delete_secret(secret.name)
print("Secret with name '{0}' was deleted.".format(deleted_secret.name))
except HttpResponseError as e:
print("\nrun_sample has caught an error. {0}".format(e.message))
finally:
print("\nrun_sample done")
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
except Exception as e:
print("Top level Error: {0}".format(str(e)))
| 49.05102
| 125
| 0.641148
|
795021a26002835bc0bcb4af81922439e38bf0ca
| 52,954
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/scalar/period/test_period.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
venv/Lib/site-packages/pandas/tests/scalar/period/test_period.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
venv/Lib/site-packages/pandas/tests/scalar/period/test_period.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
from datetime import date, datetime, timedelta
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT, period as libperiod
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.parsing import DateParseError
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG, IncompatibleFrequency
from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz
from pandas.compat.numpy import np_datetime64_compat
import pandas as pd
from pandas import NaT, Period, Timedelta, Timestamp, offsets
import pandas._testing as tm
class TestPeriodConstruction:
def test_construction(self):
i1 = Period("1/1/2005", freq="M")
i2 = Period("Jan 2005")
assert i1 == i2
i1 = Period("2005", freq="A")
i2 = Period("2005")
i3 = Period("2005", freq="a")
assert i1 == i2
assert i1 == i3
i4 = Period("2005", freq="M")
i5 = Period("2005", freq="m")
msg = r"Input has different freq=M from Period\(freq=A-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
i1 != i4
assert i4 == i5
i1 = Period.now("Q")
i2 = Period(datetime.now(), freq="Q")
i3 = Period.now("q")
assert i1 == i2
assert i1 == i3
i1 = Period("1982", freq="min")
i2 = Period("1982", freq="MIN")
assert i1 == i2
i1 = Period(year=2005, month=3, day=1, freq="D")
i2 = Period("3/1/2005", freq="D")
assert i1 == i2
i3 = Period(year=2005, month=3, day=1, freq="d")
assert i1 == i3
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
msg = "Must supply freq for ordinal value"
with pytest.raises(ValueError, match=msg):
Period(ordinal=200701)
msg = "Invalid frequency: X"
with pytest.raises(ValueError, match=msg):
Period("2007-1-1", freq="X")
# GH#34703 tuple freq disallowed
with pytest.raises(TypeError, match="pass as a string instead"):
Period("1982", freq=("Min", 1))
def test_construction_bday(self):
# Biz day construction, roll forward if non-weekday
i1 = Period("3/10/12", freq="B")
i2 = Period("3/10/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/11/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/12/12", freq="D")
assert i1 == i2.asfreq("B")
i3 = Period("3/10/12", freq="b")
assert i1 == i3
i1 = Period(year=2012, month=3, day=10, freq="B")
i2 = Period("3/12/12", freq="B")
assert i1 == i2
def test_construction_quarter(self):
i1 = Period(year=2005, quarter=1, freq="Q")
i2 = Period("1/1/2005", freq="Q")
assert i1 == i2
i1 = Period(year=2005, quarter=3, freq="Q")
i2 = Period("9/1/2005", freq="Q")
assert i1 == i2
i1 = Period("2005Q1")
i2 = Period(year=2005, quarter=1, freq="Q")
i3 = Period("2005q1")
assert i1 == i2
assert i1 == i3
i1 = Period("05Q1")
assert i1 == i2
lower = Period("05q1")
assert i1 == lower
i1 = Period("1Q2005")
assert i1 == i2
lower = Period("1q2005")
assert i1 == lower
i1 = Period("1Q05")
assert i1 == i2
lower = Period("1q05")
assert i1 == lower
i1 = Period("4Q1984")
assert i1.year == 1984
lower = Period("4q1984")
assert i1 == lower
def test_construction_month(self):
expected = Period("2007-01", freq="M")
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period(200701, freq="M")
assert i1 == expected
i1 = Period(ordinal=200701, freq="M")
assert i1.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
def test_period_constructor_offsets(self):
assert Period("1/1/2005", freq=offsets.MonthEnd()) == Period(
"1/1/2005", freq="M"
)
assert Period("2005", freq=offsets.YearEnd()) == Period("2005", freq="A")
assert Period("2005", freq=offsets.MonthEnd()) == Period("2005", freq="M")
assert Period("3/10/12", freq=offsets.BusinessDay()) == Period(
"3/10/12", freq="B"
)
assert Period("3/10/12", freq=offsets.Day()) == Period("3/10/12", freq="D")
assert Period(
year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=1, freq="Q")
assert Period(
year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=2, freq="Q")
assert Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period(
year=2005, month=3, day=1, freq="D"
)
assert Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period(
year=2012, month=3, day=10, freq="B"
)
expected = Period("2005-03-01", freq="3D")
assert Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected
assert Period(year=2005, month=3, day=1, freq="3D") == expected
assert Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period(
year=2012, month=3, day=10, freq="3B"
)
assert Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq="M")
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq="M")
assert i1 == i2
assert i1.year == 18695
assert i2.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
def test_invalid_arguments(self):
msg = "Must supply freq for datetime value"
with pytest.raises(ValueError, match=msg):
Period(datetime.now())
with pytest.raises(ValueError, match=msg):
Period(datetime.now().date())
msg = "Value must be Period, string, integer, or datetime"
with pytest.raises(ValueError, match=msg):
Period(1.6, freq="D")
msg = "Ordinal must be an integer"
with pytest.raises(ValueError, match=msg):
Period(ordinal=1.6, freq="D")
msg = "Only value or ordinal but not both should be given but not both"
with pytest.raises(ValueError, match=msg):
Period(ordinal=2, value=1, freq="D")
msg = "If value is None, freq cannot be None"
with pytest.raises(ValueError, match=msg):
Period(month=1)
msg = "Given date string not likely a datetime"
with pytest.raises(ValueError, match=msg):
Period("-2000", "A")
msg = "day is out of range for month"
with pytest.raises(DateParseError, match=msg):
Period("0", "A")
msg = "Unknown datetime string format, unable to parse"
with pytest.raises(DateParseError, match=msg):
Period("1/1/-2000", "A")
def test_constructor_corner(self):
expected = Period("2007-01", freq="2M")
assert Period(year=2007, month=1, freq="2M") == expected
assert Period(None) is NaT
p = Period("2007-01-01", freq="D")
result = Period(p, freq="A")
exp = Period("2007", freq="A")
assert result == exp
def test_constructor_infer_freq(self):
p = Period("2007-01-01")
assert p.freq == "D"
p = Period("2007-01-01 07")
assert p.freq == "H"
p = Period("2007-01-01 07:10")
assert p.freq == "T"
p = Period("2007-01-01 07:10:15")
assert p.freq == "S"
p = Period("2007-01-01 07:10:15.123")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123000")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123400")
assert p.freq == "U"
def test_multiples(self):
result1 = Period("1989", freq="2A")
result2 = Period("1989", freq="A")
assert result1.ordinal == result2.ordinal
assert result1.freqstr == "2A-DEC"
assert result2.freqstr == "A-DEC"
assert result1.freq == offsets.YearEnd(2)
assert result2.freq == offsets.YearEnd()
assert (result1 + 1).ordinal == result1.ordinal + 2
assert (1 + result1).ordinal == result1.ordinal + 2
assert (result1 - 1).ordinal == result2.ordinal - 2
assert (-1 + result1).ordinal == result2.ordinal - 2
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_quarterly(self, month):
# bugs in scikits.timeseries
freq = f"Q-{month}"
exp = Period("1989Q3", freq=freq)
assert "1989Q3" in str(exp)
stamp = exp.to_timestamp("D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
stamp = exp.to_timestamp("3D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_annual(self, month):
# bugs in scikits.timeseries
freq = f"A-{month}"
exp = Period("1989", freq=freq)
stamp = exp.to_timestamp("D", how="end") + timedelta(days=30)
p = Period(stamp, freq=freq)
assert p == exp + 1
assert isinstance(p, Period)
@pytest.mark.parametrize("day", DAYS)
@pytest.mark.parametrize("num", range(10, 17))
def test_period_cons_weekly(self, num, day):
daystr = f"2011-02-{num}"
freq = f"W-{day}"
result = Period(daystr, freq=freq)
expected = Period(daystr, freq="D").asfreq(freq)
assert result == expected
assert isinstance(result, Period)
def test_period_from_ordinal(self):
p = Period("2011-01", freq="M")
res = Period._from_ordinal(p.ordinal, freq="M")
assert p == res
assert isinstance(res, Period)
@pytest.mark.parametrize("freq", ["A", "M", "D", "H"])
def test_construct_from_nat_string_and_freq(self, freq):
per = Period("NaT", freq=freq)
assert per is NaT
per = Period("NaT", freq="2" + freq)
assert per is NaT
per = Period("NaT", freq="3" + freq)
assert per is NaT
def test_period_cons_nat(self):
p = Period("nat", freq="W-SUN")
assert p is NaT
p = Period(iNaT, freq="D")
assert p is NaT
p = Period(iNaT, freq="3D")
assert p is NaT
p = Period(iNaT, freq="1D1H")
assert p is NaT
p = Period("NaT")
assert p is NaT
p = Period(iNaT)
assert p is NaT
def test_period_cons_mult(self):
p1 = Period("2011-01", freq="3M")
p2 = Period("2011-01", freq="M")
assert p1.ordinal == p2.ordinal
assert p1.freq == offsets.MonthEnd(3)
assert p1.freqstr == "3M"
assert p2.freq == offsets.MonthEnd()
assert p2.freqstr == "M"
result = p1 + 1
assert result.ordinal == (p2 + 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
result = p1 - 1
assert result.ordinal == (p2 - 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
msg = "Frequency must be positive, because it represents span: -3M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-3M")
msg = "Frequency must be positive, because it represents span: 0M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0M")
def test_period_cons_combined(self):
p = [
(
Period("2011-01", freq="1D1H"),
Period("2011-01", freq="1H1D"),
Period("2011-01", freq="H"),
),
(
Period(ordinal=1, freq="1D1H"),
Period(ordinal=1, freq="1H1D"),
Period(ordinal=1, freq="H"),
),
]
for p1, p2, p3 in p:
assert p1.ordinal == p3.ordinal
assert p2.ordinal == p3.ordinal
assert p1.freq == offsets.Hour(25)
assert p1.freqstr == "25H"
assert p2.freq == offsets.Hour(25)
assert p2.freqstr == "25H"
assert p3.freq == offsets.Hour()
assert p3.freqstr == "H"
result = p1 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
result = p1 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
msg = "Frequency must be positive, because it represents span: -25H"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1H1D")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1H1D")
msg = "Frequency must be positive, because it represents span: 0D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0D0H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="0D0H")
# You can only combine together day and intraday offsets
msg = "Invalid frequency: 1W1D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1W1D")
msg = "Invalid frequency: 1D1W"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1D1W")
@pytest.mark.parametrize("hour", range(24))
def test_period_large_ordinal(self, hour):
# Issue #36430
# Integer overflow for Period over the maximum timestamp
p = pd.Period(ordinal=2562048 + hour, freq="1H")
assert p.hour == hour
class TestPeriodMethods:
def test_round_trip(self):
p = Period("2000Q1")
new_p = tm.round_trip_pickle(p)
assert new_p == p
def test_hash(self):
assert hash(Period("2011-01", freq="M")) == hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01-01", freq="D")) != hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01", freq="3M")) != hash(Period("2011-01", freq="2M"))
assert hash(Period("2011-01", freq="M")) != hash(Period("2011-02", freq="M"))
# --------------------------------------------------------------
# to_timestamp
@pytest.mark.parametrize("tzstr", ["Europe/Brussels", "Asia/Tokyo", "US/Pacific"])
def test_to_timestamp_tz_arg(self, tzstr):
# GH#34522 tz kwarg deprecated
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="3H").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="A").to_timestamp(freq="A", tz=tzstr)
exp = Timestamp("31/12/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="A").to_timestamp(freq="3H", tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
@pytest.mark.parametrize(
"tzstr",
["dateutil/Europe/Brussels", "dateutil/Asia/Tokyo", "dateutil/US/Pacific"],
)
def test_to_timestamp_tz_arg_dateutil(self, tzstr):
tz = maybe_get_tz(tzstr)
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(freq="3H", tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
def test_to_timestamp_tz_arg_dateutil_from_string(self):
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz="dateutil/Europe/Brussels")
assert p.tz == dateutil_gettz("Europe/Brussels")
def test_to_timestamp_mult(self):
p = Period("2011-01", freq="M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-02-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
p = Period("2011-01", freq="3M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-04-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
def test_to_timestamp(self):
p = Period("1982", freq="A")
start_ts = p.to_timestamp(how="S")
aliases = ["s", "StarT", "BEGIn"]
for a in aliases:
assert start_ts == p.to_timestamp("D", how=a)
# freq with mult should not affect to the result
assert start_ts == p.to_timestamp("3D", how=a)
end_ts = p.to_timestamp(how="E")
aliases = ["e", "end", "FINIsH"]
for a in aliases:
assert end_ts == p.to_timestamp("D", how=a)
assert end_ts == p.to_timestamp("3D", how=a)
from_lst = ["A", "Q", "M", "W", "B", "D", "H", "Min", "S"]
def _ex(p):
if p.freq == "B":
return p.start_time + Timedelta(days=1, nanoseconds=-1)
return Timestamp((p + p.freq).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period("1982", freq=fcode)
result = p.to_timestamp().to_period(fcode)
assert result == p
assert p.start_time == p.to_timestamp(how="S")
assert p.end_time == _ex(p)
# Frequency other than daily
p = Period("1985", freq="A")
result = p.to_timestamp("H", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("3H", how="end")
assert result == expected
result = p.to_timestamp("T", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("2T", how="end")
assert result == expected
result = p.to_timestamp(how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
expected = datetime(1985, 1, 1)
result = p.to_timestamp("H", how="start")
assert result == expected
result = p.to_timestamp("T", how="start")
assert result == expected
result = p.to_timestamp("S", how="start")
assert result == expected
result = p.to_timestamp("3H", how="start")
assert result == expected
result = p.to_timestamp("5S", how="start")
assert result == expected
def test_to_timestamp_business_end(self):
per = pd.Period("1990-01-05", "B") # Friday
result = per.to_timestamp("B", how="E")
expected = pd.Timestamp("1990-01-06") - pd.Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize(
"ts, expected",
[
("1970-01-01 00:00:00", 0),
("1970-01-01 00:00:00.000001", 1),
("1970-01-01 00:00:00.00001", 10),
("1970-01-01 00:00:00.499", 499000),
("1999-12-31 23:59:59.999", 999000),
("1999-12-31 23:59:59.999999", 999999),
("2050-12-31 23:59:59.5", 500000),
("2050-12-31 23:59:59.500001", 500001),
("2050-12-31 23:59:59.123456", 123456),
],
)
@pytest.mark.parametrize("freq", [None, "us", "ns"])
def test_to_timestamp_microsecond(self, ts, expected, freq):
# GH 24444
result = Period(ts).to_timestamp(freq=freq).microsecond
assert result == expected
# --------------------------------------------------------------
# Rendering: __repr__, strftime, etc
def test_repr(self):
p = Period("Jan-2000")
assert "2000-01" in repr(p)
p = Period("2000-12-15")
assert "2000-12-15" in repr(p)
def test_repr_nat(self):
p = Period("nat", freq="M")
assert repr(NaT) in repr(p)
def test_millisecond_repr(self):
p = Period("2000-01-01 12:15:02.123")
assert repr(p) == "Period('2000-01-01 12:15:02.123', 'L')"
def test_microsecond_repr(self):
p = Period("2000-01-01 12:15:02.123567")
assert repr(p) == "Period('2000-01-01 12:15:02.123567', 'U')"
def test_strftime(self):
# GH#3363
p = Period("2000-1-1 12:34:12", freq="S")
res = p.strftime("%Y-%m-%d %H:%M:%S")
assert res == "2000-01-01 12:34:12"
assert isinstance(res, str)
class TestPeriodProperties:
"""Test properties such as year, month, weekday, etc...."""
@pytest.mark.parametrize("freq", ["A", "M", "D", "H"])
def test_is_leap_year(self, freq):
# GH 13727
p = Period("2000-01-01 00:00:00", freq=freq)
assert p.is_leap_year
assert isinstance(p.is_leap_year, bool)
p = Period("1999-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
p = Period("2004-01-01 00:00:00", freq=freq)
assert p.is_leap_year
p = Period("2100-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 4
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 3
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="M")
assert p.year == 1969
assert p.month == 11
assert isinstance(p, Period)
def test_freq_str(self):
i1 = Period("1982", freq="Min")
assert i1.freq == offsets.Minute()
assert i1.freqstr == "T"
def test_period_deprecated_freq(self):
cases = {
"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"],
"B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"],
"D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"],
"H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"],
"T": ["minute", "MINUTE", "MINUTELY", "minutely"],
"S": ["sec", "SEC", "SECOND", "SECONDLY", "second"],
"L": ["MILLISECOND", "MILLISECONDLY", "millisecond"],
"U": ["MICROSECOND", "MICROSECONDLY", "microsecond"],
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"],
}
msg = INVALID_FREQ_ERR_MSG
for exp, freqs in cases.items():
for freq in freqs:
with pytest.raises(ValueError, match=msg):
Period("2016-03-01 09:00", freq=freq)
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq=freq)
# check supported freq-aliases still works
p1 = Period("2016-03-01 09:00", freq=exp)
p2 = Period(ordinal=1, freq=exp)
assert isinstance(p1, Period)
assert isinstance(p2, Period)
def test_start_time(self):
freq_lst = ["A", "Q", "M", "D", "H", "T", "S"]
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period("2012", freq=f)
assert p.start_time == xp
assert Period("2012", freq="B").start_time == datetime(2012, 1, 2)
assert Period("2012", freq="W").start_time == datetime(2011, 12, 26)
def test_end_time(self):
p = Period("2012", freq="A")
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="Q")
xp = _ex(2012, 4, 1)
assert xp == p.end_time
p = Period("2012", freq="M")
xp = _ex(2012, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="D")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
p = Period("2012", freq="H")
xp = _ex(2012, 1, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="B")
xp = _ex(2012, 1, 3)
assert xp == p.end_time
p = Period("2012", freq="W")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
# Test for GH 11738
p = Period("2012", freq="15D")
xp = _ex(2012, 1, 16)
assert xp == p.end_time
p = Period("2012", freq="1D1H")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="1H1D")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
def test_end_time_business_friday(self):
# GH#34449
per = Period("1990-01-05", "B")
result = per.end_time
expected = pd.Timestamp("1990-01-06") - pd.Timedelta(nanoseconds=1)
assert result == expected
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period("2013-1-1", "W-SAT")
xp = _ex(2013, 1, 6)
assert p.end_time == xp
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq="A", year=2007)
assert a_date.year == 2007
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert (qd + x).qyear == 2007
assert (qd + x).quarter == x + 1
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq="M", year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert m_ival_x.year == 2007
if 1 <= x + 1 <= 3:
assert m_ival_x.quarter == 1
elif 4 <= x + 1 <= 6:
assert m_ival_x.quarter == 2
elif 7 <= x + 1 <= 9:
assert m_ival_x.quarter == 3
elif 10 <= x + 1 <= 12:
assert m_ival_x.quarter == 4
assert m_ival_x.month == x + 1
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
#
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
assert Period(freq="W", year=2012, month=2, day=1).days_in_month == 29
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
exp = Period(freq="W", year=2012, month=2, day=1)
assert exp.days_in_month == 29
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=7)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq="B", year=2007, month=1, day=1)
#
assert b_date.year == 2007
assert b_date.quarter == 1
assert b_date.month == 1
assert b_date.day == 1
assert b_date.weekday == 0
assert b_date.dayofyear == 1
assert b_date.days_in_month == 31
assert Period(freq="B", year=2012, month=2, day=1).days_in_month == 29
d_date = Period(freq="D", year=2007, month=1, day=1)
assert d_date.year == 2007
assert d_date.quarter == 1
assert d_date.month == 1
assert d_date.day == 1
assert d_date.weekday == 0
assert d_date.dayofyear == 1
assert d_date.days_in_month == 31
assert Period(freq="D", year=2012, month=2, day=1).days_in_month == 29
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq="H", year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq="2H", year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert h_date.year == 2007
assert h_date.quarter == 1
assert h_date.month == 1
assert h_date.day == 1
assert h_date.weekday == 0
assert h_date.dayofyear == 1
assert h_date.hour == 0
assert h_date.days_in_month == 31
assert (
Period(freq="H", year=2012, month=2, day=1, hour=0).days_in_month == 29
)
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
#
assert t_date.quarter == 1
assert t_date.month == 1
assert t_date.day == 1
assert t_date.weekday == 0
assert t_date.dayofyear == 1
assert t_date.hour == 0
assert t_date.minute == 0
assert t_date.days_in_month == 31
assert (
Period(freq="D", year=2012, month=2, day=1, hour=0, minute=0).days_in_month
== 29
)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
#
assert s_date.year == 2007
assert s_date.quarter == 1
assert s_date.month == 1
assert s_date.day == 1
assert s_date.weekday == 0
assert s_date.dayofyear == 1
assert s_date.hour == 0
assert s_date.minute == 0
assert s_date.second == 0
assert s_date.days_in_month == 31
assert (
Period(
freq="Min", year=2012, month=2, day=1, hour=0, minute=0, second=0
).days_in_month
== 29
)
class TestPeriodField:
def test_get_period_field_array_raises_on_out_of_range(self):
msg = "Buffer dtype mismatch, expected 'const int64_t' but got 'double'"
with pytest.raises(ValueError, match=msg):
libperiod.get_period_field_arr(-1, np.empty(1), 0)
class TestPeriodComparisons:
def test_comparison_same_period_different_object(self):
# Separate Period objects for the same period
left = Period("2000-01", "M")
right = Period("2000-01", "M")
assert left == right
assert left >= right
assert left <= right
assert not left < right
assert not left > right
def test_comparison_same_freq(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
assert not jan == feb
assert jan != feb
assert jan < feb
assert jan <= feb
assert not jan > feb
assert not jan >= feb
def test_comparison_mismatched_freq(self):
jan = Period("2000-01", "M")
day = Period("2012-01-01", "D")
msg = r"Input has different freq=D from Period\(freq=M\)"
with pytest.raises(IncompatibleFrequency, match=msg):
jan == day
with pytest.raises(IncompatibleFrequency, match=msg):
jan != day
with pytest.raises(IncompatibleFrequency, match=msg):
jan < day
with pytest.raises(IncompatibleFrequency, match=msg):
jan <= day
with pytest.raises(IncompatibleFrequency, match=msg):
jan > day
with pytest.raises(IncompatibleFrequency, match=msg):
jan >= day
def test_comparison_invalid_type(self):
jan = Period("2000-01", "M")
assert not jan == 1
assert jan != 1
int_or_per = "'(Period|int)'"
msg = f"not supported between instances of {int_or_per} and {int_or_per}"
for left, right in [(jan, 1), (1, jan)]:
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
def test_sort_periods(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
mar = Period("2000-03", "M")
periods = [mar, jan, feb]
correctPeriods = [jan, feb, mar]
assert sorted(periods) == correctPeriods
def test_period_cmp_nat(self):
p = Period("2011-01-01", freq="D")
t = Timestamp("2011-01-01")
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [
(NaT, p),
(p, NaT),
(NaT, t),
(t, NaT),
]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
class TestArithmetic:
def test_sub_delta(self):
left, right = Period("2011", freq="A"), Period("2007", freq="A")
result = left - right
assert result == 4 * right.freq
msg = r"Input has different freq=M from Period\(freq=A-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
left - Period("2007-01", freq="M")
def test_add_integer(self):
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
assert per1 + 1 == per2
assert 1 + per1 == per2
def test_add_sub_nat(self):
# GH#13071
p = Period("2011-01", freq="M")
assert p + NaT is NaT
assert NaT + p is NaT
assert p - NaT is NaT
assert NaT - p is NaT
def test_add_invalid(self):
# GH#4731
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
msg = "|".join(
[
r"unsupported operand type\(s\)",
"can only concatenate str",
"must be str, not Period",
]
)
with pytest.raises(TypeError, match=msg):
per1 + "str"
with pytest.raises(TypeError, match=msg):
"str" + per1
with pytest.raises(TypeError, match=msg):
per1 + per2
boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])]
ids = ["identity", "Series", "Index"]
@pytest.mark.parametrize("lbox", boxes, ids=ids)
@pytest.mark.parametrize("rbox", boxes, ids=ids)
def test_add_timestamp_raises(self, rbox, lbox):
# GH#17983
ts = Timestamp("2017")
per = Period("2017", freq="M")
# We may get a different message depending on which class raises
# the error.
msg = "|".join(
[
"cannot add",
"unsupported operand",
"can only operate on a",
"incompatible type",
"ufunc add cannot use operands",
]
)
with pytest.raises(TypeError, match=msg):
lbox(ts) + rbox(per)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(ts)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(per)
def test_sub(self):
per1 = Period("2011-01-01", freq="D")
per2 = Period("2011-01-15", freq="D")
off = per1.freq
assert per1 - per2 == -14 * off
assert per2 - per1 == 14 * off
msg = r"Input has different freq=M from Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
per1 - Period("2011-02", freq="M")
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1 = Period("19910905", freq=tick_classes(n))
p2 = Period("19920406", freq=tick_classes(n))
expected = Period(str(p2), freq=p2.freq.base) - Period(
str(p1), freq=p1.freq.base
)
assert (p2 - p1) == expected
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(offsets.YearEnd, "month"),
(offsets.QuarterEnd, "startingMonth"),
(offsets.MonthEnd, None),
(offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
p1 = Period(p1_d, freq=offset(n, normalize, **kwds))
p2 = Period(p2_d, freq=offset(n, normalize, **kwds))
expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base)
assert (p2 - p1) == expected
def test_add_offset(self):
# freq is DateOffset
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
exp = Period("2013", freq=freq)
assert p + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
exp = Period("2011-05", freq=freq)
assert p + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + p == exp
exp = Period("2012-03", freq=freq)
assert p + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
exp = Period("2011-04-06", freq=freq)
assert p + offsets.Day(5) == exp
assert offsets.Day(5) + p == exp
exp = Period("2011-04-02", freq=freq)
assert p + offsets.Hour(24) == exp
assert offsets.Hour(24) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + np.timedelta64(2, "D") == exp
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
np.timedelta64(2, "D") + p
exp = Period("2011-04-02", freq=freq)
assert p + np.timedelta64(3600 * 24, "s") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3600 * 24, "s") + p
exp = Period("2011-03-30", freq=freq)
assert p + timedelta(-2) == exp
assert timedelta(-2) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + timedelta(hours=48) == exp
assert timedelta(hours=48) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
exp = Period("2011-04-03 09:00", freq=freq)
assert p + offsets.Day(2) == exp
assert offsets.Day(2) + p == exp
exp = Period("2011-04-01 12:00", freq=freq)
assert p + offsets.Hour(3) == exp
assert offsets.Hour(3) + p == exp
msg = "cannot use operands with types"
exp = Period("2011-04-01 12:00", freq=freq)
assert p + np.timedelta64(3, "h") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3, "h") + p
exp = Period("2011-04-01 10:00", freq=freq)
assert p + np.timedelta64(3600, "s") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3600, "s") + p
exp = Period("2011-04-01 11:00", freq=freq)
assert p + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + p == exp
exp = Period("2011-04-05 12:00", freq=freq)
assert p + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
def test_sub_offset(self):
# freq is DateOffset
msg = "Input has different freq|Input cannot be converted to Period"
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
assert p - offsets.YearEnd(2) == Period("2009", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
assert p - offsets.MonthEnd(2) == Period("2011-01", freq=freq)
assert p - offsets.MonthEnd(12) == Period("2010-03", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
assert p - offsets.Day(5) == Period("2011-03-27", freq=freq)
assert p - offsets.Hour(24) == Period("2011-03-31", freq=freq)
assert p - np.timedelta64(2, "D") == Period("2011-03-30", freq=freq)
assert p - np.timedelta64(3600 * 24, "s") == Period("2011-03-31", freq=freq)
assert p - timedelta(-2) == Period("2011-04-03", freq=freq)
assert p - timedelta(hours=48) == Period("2011-03-30", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
assert p - offsets.Day(2) == Period("2011-03-30 09:00", freq=freq)
assert p - offsets.Hour(3) == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3, "h") == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3600, "s") == Period(
"2011-04-01 08:00", freq=freq
)
assert p - timedelta(minutes=120) == Period("2011-04-01 07:00", freq=freq)
assert p - timedelta(days=4, minutes=180) == Period(
"2011-03-28 06:00", freq=freq
)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_period_addsub_nat(self, freq):
per = Period("2011-01", freq=freq)
# For subtraction, NaT is treated as another Period object
assert NaT - per is NaT
assert per - NaT is NaT
# For addition, NaT is treated as offset-like
assert NaT + per is NaT
assert per + NaT is NaT
def test_period_ops_offset(self):
p = Period("2011-04-01", freq="D")
result = p + offsets.Day()
exp = Period("2011-04-02", freq="D")
assert result == exp
result = p - offsets.Day(2)
exp = Period("2011-03-30", freq="D")
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
p + offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
p - offsets.Hour(2)
def test_period_immutable():
# see gh-17116
msg = "not writable"
per = Period("2014Q1")
with pytest.raises(AttributeError, match=msg):
per.ordinal = 14
freq = per.freq
with pytest.raises(AttributeError, match=msg):
per.freq = 2 * freq
def test_small_year_parsing():
per1 = Period("0001-01-07", "D")
assert per1.year == 1
assert per1.day == 7
def test_negone_ordinals():
freqs = ["A", "M", "Q", "D", "H", "T", "S"]
period = Period(ordinal=-1, freq="D")
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
assert period.year == 1969
period = Period(ordinal=-1, freq="B")
repr(period)
period = Period(ordinal=-1, freq="W")
repr(period)
| 34.678454
| 88
| 0.535295
|
795021aefbc2348d21d7faf5f4cc9f4b96290793
| 5,042
|
py
|
Python
|
review_classes.py
|
UstymHanyk/BookGoogle
|
20f2825093c4676d05ca88138b6c140f1f8e174a
|
[
"MIT"
] | null | null | null |
review_classes.py
|
UstymHanyk/BookGoogle
|
20f2825093c4676d05ca88138b6c140f1f8e174a
|
[
"MIT"
] | null | null | null |
review_classes.py
|
UstymHanyk/BookGoogle
|
20f2825093c4676d05ca88138b6c140f1f8e174a
|
[
"MIT"
] | null | null | null |
"""
Module contains class 'Review', which contains information
about book review, written in english, and class 'ReviewList',
which keeps all book reviews.
"""
from nltk.sentiment import SentimentIntensityAnalyzer
from typing import List
from langdetect import detect
from langdetect.lang_detect_exception import LangDetectException
class Review:
"""
Information about the review.
"""
def __init__(self, info_tuple):
"""
Initializes the class.
:type info_tuple: tuple
:param info_tuple: Information about the review.
"""
self.author = info_tuple[0]
self.rating = info_tuple[1]
self.text = info_tuple[2]
self.length = len(info_tuple[2])
self.neutrality = self.calc_neutrality()
def calc_neutrality(self):
"""
Calculates neutral lexic's percentage
in the text.
:type output: float
:param output: Neutrality.
"""
sia_object = SentimentIntensityAnalyzer()
return sia_object.polarity_scores(self.text)['neu']
def __lt__(self, other) -> bool:
"""
Compares reviews' ratings and reliability
by three aspects.
1 - rating
2 - amount of neutral language
3 - length of the text
Method is needed for future comparing of reviews
and sorting.
:type other: Review
:param other: Another review.
"""
if self.rating == other.rating:
if self.neutrality == other.neutrality:
if self.length < other.length:
return True
else:
return False
return self.neutrality < other.neutrality
return self.rating < other.rating
def __repr__(self) -> str:
"""Returns the string to represent the
class."""
return f"username: {self.author}\nrating: \
{self.rating * '⋆'}\n{self.text}\ntotal length: {self.length}\n\
neutrality of text: {self.neutrality}\n"
class ReviewList:
"""
Keeps and sort Review objects.
"""
def __init__(self):
"""Initializes the class."""
self.reviews = []
def __repr__(self) -> str:
"""
Returns the string to represent the
class.
:type output: str
:param output: Representance of class object.
"""
final_str = ''
for review in self.reviews:
final_str += str(review)
return final_str
def clear(self):
"""
Clears itself and returns all of the data
:type output: ReviewList
:param output: Copy of object.
"""
deleted_data = ReviewList()
deleted_data.reviews = self.reviews
self.reviews = []
return deleted_data
def add_review(self, review):
"""
Adds a new review if it's written in English.
:type review: Review
:param review: New review.
"""
try:
if detect(review.text.split('.')[0]) == 'en':
self.reviews.append(review)
except LangDetectException:
print(f"Language of ({review.text.split('.')[0]}) could not be detect")
def reliability_sort(self):
"""
Sorts reviews by their rates, length and
number of neutral language in descending order.
Here the adapted method __lt__ for class
Reviews is used.
"""
self.reviews.sort(reverse=True)
def get_mood_range(self, mood_lst=[5, 3, 2]) -> List[Review]:
"""
Returns the list of three most reliable
reviews from the all given.
Gets the sorted list of reviews and returns
list with first positive, nutral and negative reviews
(rating 5, 3 and 2 in accordance). There would be our
most reliable reviews from every category.
If there are no reviews with ratings5 or 2, the method
will return reviews with ratings 4 or 1.
:type output: List[Review]
:param output: List of Review objects.
"""
self.reliability_sort()
result = []
index = 0
while index < len(mood_lst):
for review in self.reviews:
if index < len(mood_lst):
if review.rating == mood_lst[index]:
result.append(review)
index += 1
index += 1
if len(result) < 3 and len(mood_lst) > 2:
if any(review.rating == 2 for review in result) is False and \
any(review.rating == 5 for review in result) is False:
result += self.get_mood_range(mood_lst=[4, 1])
elif not any(review.rating == 5 for review in result):
result += self.get_mood_range(mood_lst=[4])
elif not any(review.rating == 2 for review in result):
result += self.get_mood_range(mood_lst=(1,))
result.sort(reverse=True)
return result
| 30.191617
| 83
| 0.57616
|
795021d76cc752df2b8878751822432b1a7ed04f
| 15,352
|
py
|
Python
|
gmprocess/io/knet/knet_fetcher.py
|
ftbernales/groundmotion-processing
|
5be88da75e7168bd2421973d6f1e54a91c679dc8
|
[
"Unlicense"
] | null | null | null |
gmprocess/io/knet/knet_fetcher.py
|
ftbernales/groundmotion-processing
|
5be88da75e7168bd2421973d6f1e54a91c679dc8
|
[
"Unlicense"
] | null | null | null |
gmprocess/io/knet/knet_fetcher.py
|
ftbernales/groundmotion-processing
|
5be88da75e7168bd2421973d6f1e54a91c679dc8
|
[
"Unlicense"
] | null | null | null |
# stdlib imports
from datetime import datetime, timedelta
import re
from collections import OrderedDict
import tempfile
import os.path
import tarfile
import glob
import shutil
import logging
import urllib
# third party imports
import pytz
import numpy as np
import requests
from bs4 import BeautifulSoup
from openquake.hazardlib.geo.geodetic import geodetic_distance
from obspy.core.utcdatetime import UTCDateTime
# local imports
from gmprocess.io.fetcher import DataFetcher, _get_first_value
from gmprocess.io.knet.core import read_knet
from gmprocess.core.streamcollection import StreamCollection
from gmprocess.utils.config import get_config
JST_OFFSET = 9 * 3600 # Japan standard time is UTC + 9
SEARCH_URL = 'http://www.kyoshin.bosai.go.jp/cgi-bin/kyoshin/quick/list_eqid_en.cgi?1+YEAR+QUARTER'
RETRIEVE_URL = 'http://www.kyoshin.bosai.go.jp/cgi-bin/kyoshin/auth/makearc'
# http://www.kyoshin.bosai.go.jp/cgi-bin/kyoshin/auth/makearc?formattype=A&eqidlist=20180330081700%2C20180330000145%2C20180330081728%2C1%2C%2Fkyoshin%2Fpubdata%2Fall%2F1comp%2F2018%2F03%2F20180330081700%2F20180330081700.all_acmap.png%2C%2Fkyoshin%2Fpubdata%2Fknet%2F1comp%2F2018%2F03%2F20180330081700%2F20180330081700.knt_acmap.png%2C%2Fkyoshin%2Fpubdata%2Fkik%2F1comp%2F2018%2F03%2F20180330081700%2F20180330081700.kik_acmap.png%2CHPRL&datanames=20180330081700%3Balldata&datakind=all
CGIPARAMS = OrderedDict()
CGIPARAMS['formattype'] = 'A'
CGIPARAMS['eqidlist'] = ''
CGIPARAMS['datanames'] = ''
CGIPARAMS['alldata'] = None
CGIPARAMS['datakind'] = 'all'
QUARTERS = {1: 1, 2: 1, 3: 1,
4: 4, 5: 4, 6: 4,
7: 7, 8: 7, 9: 7,
10: 10, 11: 10, 12: 10}
# 2019/03/13-13:48:00.00
TIMEPAT = r'[0-9]{4}/[0-9]{2}/[0-9]{2}-[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{2}'
LATPAT = r'[0-9]{2}\.[0-9]{2}N'
LONPAT = r'[0-9]{3}\.[0-9]{2}E'
DEPPAT = '[0-9]{3}km'
MAGPAT = r'M[0-9]{1}\.[0-9]{1}'
TIMEFMT = '%Y/%m/%d-%H:%M:%S.%f'
# default values for this fetcher
# if None specified in constructor, AND no parameters specified in
# config, then use these.
RADIUS = 100 # kilometers
DT = 60 # seconds
DDEPTH = 30 # km
DMAG = 0.3
URL_ERROR_CODE = 200 # if we get this from a request, we're good
# create a dictionary of magnitudes and distances. These will be used with
# this fetcher to restrict the number of stations from Japan that are processed
# and stored. The distances are derived from an empirical analysis of active
# region earthquakes. In a small sample size, this seems to reduce the number
# of Japanese stations by roughly 25%.
MAGS = OrderedDict()
MAGS[5.5] = 122
MAGS[6.5] = 288
MAGS[7.5] = 621
MAGS[9.9] = 1065
class KNETFetcher(DataFetcher):
def __init__(self, time, lat, lon, depth, magnitude,
user=None, password=None, radius=None, dt=None, ddepth=None,
dmag=None, rawdir=None, config=None,
drop_non_free=True, stream_collection=True):
"""Create a KNETFetcher instance.
Download KNET/KikNet data from the Japanese NIED site:
http://www.kyoshin.bosai.go.jp/cgi-bin/kyoshin/quick/list_eqid_en.cgi
Args:
time (datetime):
Origin time.
lat (float):
Origin latitude.
lon (float):
Origin longitude.
depth (float):
Origin depth.
magnitude (float):
Origin magnitude.
user (str):
username for KNET/KikNET site.
password (str):
(Optional) password for site.
radius (float):
Search radius (km).
dt (float):
Search time window (sec).
ddepth (float):
Search depth window (km).
dmag (float):
Search magnitude window (magnitude units).
rawdir (str):
Path to location where raw data will be stored. If not
specified, raw data will be deleted.
config (dict):
Dictionary containing configuration.
If None, retrieve global config.
drop_non_free (bool):
Option to ignore non-free-field (borehole, sensors on
structures, etc.)
stream_collection (bool):
Construct and return a StreamCollection instance?
"""
# what values do we use for search thresholds?
# In order of priority:
# 1) Not-None values passed in constructor
# 2) Configured values
# 3) DEFAULT values at top of the module
if config is None:
config = get_config()
cfg_radius = None
cfg_dt = None
cfg_ddepth = None
cfg_dmag = None
cfg_user = None
cfg_password = None
if 'fetchers' in config:
if 'KNETFetcher' in config['fetchers']:
fetch_cfg = config['fetchers']['KNETFetcher']
if 'radius' in fetch_cfg:
cfg_radius = float(fetch_cfg['radius'])
if 'dt' in fetch_cfg:
cfg_dt = float(fetch_cfg['dt'])
if 'ddepth' in fetch_cfg:
cfg_ddepth = float(fetch_cfg['ddepth'])
if 'dmag' in fetch_cfg:
cfg_dmag = float(fetch_cfg['dmag'])
if 'user' in fetch_cfg:
cfg_user = fetch_cfg['user']
if 'password' in fetch_cfg:
cfg_password = fetch_cfg['password']
radius = _get_first_value(radius, cfg_radius, RADIUS)
dt = _get_first_value(dt, cfg_dt, DT)
ddepth = _get_first_value(ddepth, cfg_ddepth, DDEPTH)
dmag = _get_first_value(dmag, cfg_dmag, DMAG)
# for knet/kiknet, username/password is required
if user is None or password is None:
# check to see if those values are configured
if cfg_user and cfg_password:
user = cfg_user
password = cfg_password
else:
fmt = ('Username/password are required to retrieve '
'KNET/KikNET data.')
raise Exception(fmt)
if user == 'USERNAME' or password == 'PASSWORD':
fmt = ('Username/password are required to retrieve KNET/KikNET\n'
'data. This tool can download data from the Japanese NIED\n'
'website. However, for this to work you will first need \n'
'to obtain a username and password from this website:\n'
'https://hinetwww11.bosai.go.jp/nied/registration/?LANG=en\n'
'Then create a custom config file by running the gmsetup\n'
'program, and edit the fetchers:KNETFetcher section\n'
'to use your username and password.')
raise Exception(fmt)
# allow user to turn restrict stations on or off. Restricting saves
# time, probably will not ignore significant data.
self.restrict_stations = \
config['fetchers']['KNETFetcher']['restrict_stations']
self.user = user
self.password = password
tz = pytz.UTC
if isinstance(time, UTCDateTime):
time = time.datetime
self.time = tz.localize(time)
self.lat = lat
self.lon = lon
self.radius = radius
self.dt = dt
self.rawdir = rawdir
self.depth = depth
self.magnitude = magnitude
self.ddepth = ddepth
self.dmag = dmag
self.jptime = self.time + timedelta(seconds=JST_OFFSET)
xmin = 127.705
xmax = 147.393
ymin = 29.428
ymax = 46.109
# this announces to the world the valid bounds for this fetcher.
self.BOUNDS = [xmin, xmax, ymin, ymax]
self.drop_non_free = drop_non_free
self.stream_collection = stream_collection
def getMatchingEvents(self, solve=True):
"""Return a list of dictionaries matching input parameters.
Args:
solve (bool):
If set to True, then this method
should return a list with a maximum of one event.
Returns:
list: List of event dictionaries, with fields:
- time Event time (UTC)
- lat Event latitude
- lon Event longitude
- depth Event depth
- mag Event magnitude
"""
jpyear = str(self.jptime.year)
jpquarter = str(QUARTERS[self.jptime.month])
if len(jpquarter) == 1:
jpquarter = '0' + jpquarter
url = SEARCH_URL.replace('YEAR', jpyear)
url = url.replace('QUARTER', jpquarter)
req = requests.get(url)
data = req.text
soup = BeautifulSoup(data, features="lxml")
select = soup.find('select')
options = select.find_all('option')
times = []
lats = []
lons = []
depths = []
mags = []
values = []
for option in options:
if 'Data not found' in option.text:
break
eventstr = option.contents[0]
timestr = re.search(TIMEPAT, eventstr).group()
latstr = re.search(LATPAT, eventstr).group()
lonstr = re.search(LONPAT, eventstr).group()
depstr = re.search(DEPPAT, eventstr).group()
magstr = re.search(MAGPAT, eventstr).group()
lat = float(latstr.replace('N', ''))
lon = float(lonstr.replace('E', ''))
depth = float(depstr.replace('km', ''))
mag = float(magstr.replace('M', ''))
etime = datetime.strptime(timestr, TIMEFMT)
times.append(np.datetime64(etime))
lats.append(lat)
lons.append(lon)
depths.append(depth)
mags.append(mag)
values.append(option.get('value'))
events = []
if not len(times):
return events
times = np.array(times)
lats = np.array(lats)
lons = np.array(lons)
depths = np.array(depths)
mags = np.array(mags)
values = np.array(values)
distances = geodetic_distance(self.lon, self.lat, lons, lats)
didx = distances <= self.radius
jptime = np.datetime64(self.jptime)
# dtimes is in microseconds
dtimes = np.abs(jptime - times)
tidx = dtimes <= np.timedelta64(int(self.dt), 's')
etimes = times[didx & tidx]
elats = lats[didx & tidx]
elons = lons[didx & tidx]
edepths = depths[didx & tidx]
emags = mags[didx & tidx]
evalues = values[didx & tidx]
for etime, elat, elon, edep, emag, evalue in zip(etimes, elats,
elons, edepths,
emags, evalues):
jtime = UTCDateTime(str(etime))
utime = jtime - JST_OFFSET
edict = {'time': utime,
'lat': elat,
'lon': elon,
'depth': edep,
'mag': emag,
'cgi_value': evalue}
events.append(edict)
if solve and len(events) > 1:
event = self.solveEvents(events)
events = [event]
return events
def retrieveData(self, event_dict):
"""Retrieve data from NIED, turn into StreamCollection.
Args:
event (dict):
Best dictionary matching input event, fields as above
in return of getMatchingEvents().
Returns:
StreamCollection: StreamCollection object.
"""
rawdir = self.rawdir
if self.rawdir is None:
rawdir = tempfile.mkdtemp()
else:
if not os.path.isdir(rawdir):
os.makedirs(rawdir)
cgi_value = event_dict['cgi_value']
firstid = cgi_value.split(',')[0]
dtime = event_dict['time']
fname = dtime.strftime('%Y%m%d%H%M%S') + '.tar'
localfile = os.path.join(rawdir, fname)
url = RETRIEVE_URL
payload = {'formattype': ['A'],
'eqidlist': cgi_value,
'datanames': '%s;alldata' % firstid,
'datakind': ['all']}
logging.info('Downloading Japanese data into %s...' % localfile)
req = requests.get(url, params=payload,
auth=(self.user, self.password))
if req.status_code != URL_ERROR_CODE:
raise urllib.error.HTTPError(req.text)
else:
with open(localfile, 'wb') as f:
for chunk in req:
f.write(chunk)
logging.info('Finished downloading into %s...' % localfile)
# open the tarball, extract the kiknet/knet gzipped tarballs
tar = tarfile.open(localfile)
names = tar.getnames()
tarballs = []
for name in names:
if 'img' in name:
continue
ppath = os.path.join(rawdir, name)
tarballs.append(ppath)
tar.extract(name, path=rawdir)
tar.close()
# remove the tar file we downloaded
os.remove(localfile)
subdirs = []
for tarball in tarballs:
tar = tarfile.open(tarball, mode='r:gz')
if 'kik' in tarball:
subdir = os.path.join(rawdir, 'kiknet')
else:
subdir = os.path.join(rawdir, 'knet')
subdirs.append(subdir)
tar.extractall(path=subdir)
tar.close()
os.remove(tarball)
for subdir in subdirs:
gzfiles = glob.glob(os.path.join(subdir, '*.gz'))
for gzfile in gzfiles:
os.remove(gzfile)
if self.stream_collection:
streams = []
for subdir in subdirs:
datafiles = glob.glob(os.path.join(subdir, '*.*'))
for dfile in datafiles:
logging.info('Reading KNET/KikNet file %s...' % dfile)
streams += read_knet(dfile)
if self.rawdir is None:
shutil.rmtree(rawdir)
# Japan gives us a LOT of data, much of which is not useful as it
# is too far away. Use the following distance thresholds for
# different magnitude ranges, and trim streams that are beyond this
# distance.
threshold_distance = None
if self.restrict_stations:
for mag, tdistance in MAGS.items():
if self.magnitude < mag:
threshold_distance = tdistance
break
newstreams = []
for stream in streams:
slat = stream[0].stats.coordinates.latitude
slon = stream[0].stats.coordinates.longitude
distance = geodetic_distance(self.lon, self.lat, slon, slat)
if distance <= threshold_distance:
newstreams.append(stream)
stream_collection = StreamCollection(
streams=newstreams, drop_non_free=self.drop_non_free)
return stream_collection
else:
return None
| 37.171913
| 483
| 0.562663
|
7950238be69e9050aec2650a52e7398b2f01da37
| 676
|
py
|
Python
|
Process_Resolver/CPU.py
|
selcox/Process-Resolver
|
a31efc5ea928eccf0312b50ef70927930e215afd
|
[
"Apache-2.0"
] | null | null | null |
Process_Resolver/CPU.py
|
selcox/Process-Resolver
|
a31efc5ea928eccf0312b50ef70927930e215afd
|
[
"Apache-2.0"
] | null | null | null |
Process_Resolver/CPU.py
|
selcox/Process-Resolver
|
a31efc5ea928eccf0312b50ef70927930e215afd
|
[
"Apache-2.0"
] | null | null | null |
import Constants
from FIFO import FIFO
from SJF import SJF
from SRTF import SRTF
from RR import RR
class CPU():
def __init__(self, algorithm, processes, quantum = -1):
self.processes = processes
self.start(algorithm, quantum)
def start(self, algorithm, quantum):
if algorithm == Constants.FIFO:
self.history = FIFO(self.processes).history
if algorithm == Constants.SJF:
self.history = SJF(self.processes).history
if algorithm == Constants.SRTF:
self.history = SRTF(self.processes).history
if algorithm == Constants.RR:
self.history = RR(self.processes, quantum).history
| 32.190476
| 62
| 0.653846
|
79502468629b69ff04f977c208d8d96fb008a187
| 3,670
|
py
|
Python
|
catkin_ws/src/uv_robot_ros/src/clientToServer.py
|
noidname01/UV_Robotic_Challenge-Software
|
cd284fb46019c3e0cb96db4e7b6fe07512a62e05
|
[
"MIT"
] | 1
|
2020-08-17T12:43:04.000Z
|
2020-08-17T12:43:04.000Z
|
catkin_ws/src/uv_robot_ros/src/clientToServer.py
|
noidname01/UV_Robotic_Challenge-Software
|
cd284fb46019c3e0cb96db4e7b6fe07512a62e05
|
[
"MIT"
] | 3
|
2022-02-13T19:38:23.000Z
|
2022-02-27T09:49:33.000Z
|
catkin_ws/src/uv_robot_ros/src/clientToServer.py
|
noidname01/UV_Robotic_Challenge-Software
|
cd284fb46019c3e0cb96db4e7b6fe07512a62e05
|
[
"MIT"
] | 1
|
2020-08-18T13:24:23.000Z
|
2020-08-18T13:24:23.000Z
|
#!/usr/bin/env python
import socket
import time
import sys
import rospy
from uv_robot_ros.srv import cmdToRpi
#RPi's IP
#SERVER_IP = "192.168.43.35"
def cmd_to_rpi_client(actionType, dist_or_deg):
rospy.wait_for_service('cmdToRpiService')
try:
cmd_to_rpi = rospy.ServiceProxy('cmdToRpiService',cmdToRpi)
res = cmd_to_rpi(actionType, dist_or_deg)
return res.isComplete, res.errorMsg
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
return False, ""
def for_test():
while True:
cmd = input()
params = cmd.split(' ')
actionType, dist_or_deg = params[0], params[1]
isComplete, errorMsg = cmd_to_rpi_client(actionType, dist_or_deg)
if not isComplete:
print(errorMsg)
break
if __name__ == "__main__":
for_test()
# SERVER_IP = "192.168.0.203"
# SERVER_PORT = 8888
# print("Starting socket: TCP...")
# server_addr = (SERVER_IP, SERVER_PORT)
# socket_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# while True:
# try:
# print("Connecting to server @ %s:%d..." %(SERVER_IP, SERVER_PORT))
# socket_tcp.connect(server_addr)
# break
# except Exception:
# print("Can't connect to server,try it latter!")
# time.sleep(1)
# continue
# while True:
# try:
# data = socket_tcp.recv(512)
# if len(data)>0:
# print("Received: %s" % data.decode())
# command = input()
# while len(command) == 0:
# command = input()
# socket_tcp.send(command.encode())
# print("Command sent")
# time.sleep(0.01)
# continue
# except Exception as e:
# print(e)
# socket_tcp.close()
# socket_tcp=None
# sys.exit(1)
# import socket
# import time
# import sys
# import pyrealsense2 as rs
# #RPi's IP
# SERVER_IP = "192.168.43.194"
# SERVER_PORT = 8888
# # Create a context object. This object owns the handles to all connected realsense devices
# pipeline = rs.pipeline()
# pipeline.start()
# print("Starting socket: TCP...")
# server_addr = (SERVER_IP, SERVER_PORT)
# socket_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# while True:
# try:
# print("Connecting to server @ %s:%d..." %(SERVER_IP, SERVER_PORT))
# socket_tcp.connect(server_addr)
# break
# except Exception:
# print("Can't connect to server,try it latter!")
# time.sleep(1)
# continue
# # print("Please input gogo or stop to turn on/off the motor!")
# while True:
# try:
# frames = pipeline.wait_for_frames()
# depth = frames.get_depth_frame()
# if not depth: continue
# coverage = [0]*64
# for y in range(480):
# for x in range(640):
# dist = depth.get_distance(x, y)
# if 0 < dist and dist < 1:
# coverage[x//10] += 1
# if y%20 is 19:
# line = ""
# for c in coverage:
# line += " .:45678W"[c//25]
# coverage = [0]*64
# # print(line)
# data = socket_tcp.recv(4096)
# if len(data)>0:
# print("Received: %s" % data)
# socket_tcp.send(bytes(line,'utf-8'))
# time.sleep(0.01)
# continue
# except Exception as e:
# print(e)
# socket_tcp.close()
# socket_tcp=None
# sys.exit(1)
| 26.788321
| 92
| 0.543052
|
795025642023a3cb74f50cb0c798beb0615ad29b
| 10,005
|
py
|
Python
|
SamAppsUpdater.py
|
byethon/SamAppsUpdater
|
bda64c2792e582870872339d46d07eeae5e8b515
|
[
"MIT"
] | null | null | null |
SamAppsUpdater.py
|
byethon/SamAppsUpdater
|
bda64c2792e582870872339d46d07eeae5e8b515
|
[
"MIT"
] | null | null | null |
SamAppsUpdater.py
|
byethon/SamAppsUpdater
|
bda64c2792e582870872339d46d07eeae5e8b515
|
[
"MIT"
] | null | null | null |
import os
import re
from urllib import request
import requests
import subprocess
import sys
regex_info = re.compile(r".*<resultMsg>(?P<msg>[^']*)</resultMsg>"
r".*<downloadURI><!\[CDATA\[(?P<uri>[^']*)\]\]></downloadURI>"
r".*<versionCode>(?P<vs_code>\d+)</versionCode>", re.MULTILINE | re.DOTALL)
mode=''
class bcolors:
HEADER = '\033[95m'
OKGREEN = '\033[92m'
OKBLUE = '\033[94m'
OKPURPLE = '\033[95m'
INFOYELLOW = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def devproc():
global model
devproc=subprocess.Popen(["adb","shell","getprop","ro.product.model"],stdout=subprocess.PIPE,stderr=subprocess.DEVNULL)
devout = devproc.stdout.read()
devout = devout.decode('utf-8')
devout = devout.strip()
model = devout
def andproc():
global sdk_ver
andproc=subprocess.Popen(["adb","shell","getprop","ro.build.version.sdk"],stdout=subprocess.PIPE,stderr=subprocess.DEVNULL)
andout = andproc.stdout.read()
andout = andout.decode('utf-8')
andout = andout.strip()
sdk_ver = andout
def modesel():
global mode
print("Select mode to use:")
print(" (1) : Quick mode")
print(" (2) : Normal mode(Only enabled apps)")
print(" (3) : All apps Mode")
print(" (0) : Exit")
mode = input(f"{bcolors.OKBLUE}Enter the number corresponding to the mode to be used: {bcolors.ENDC}")
exec()
def exec():
qmode=''
global mode,adbout,listfile
if(mode=='1'):
print('\n')
print("Select list to use:")
print(" (1) : Enabled Applist")
print(" (2) : Complete Applist")
print(" (0) : Go back to previous Mode selection")
qmode = input(f"{bcolors.OKBLUE}Enter the number corresponding to the mode to be used: {bcolors.ENDC}")
if(qmode=='1' or qmode=='2'):
print("\n Looking for updatable packages...")
if (os.path.exists(f".list-{model}-{sdk_ver}-Mode-{int(qmode)+1}") and os.stat(f".list-{model}-{sdk_ver}-Mode-{int(qmode)+1}").st_size != 0):
listfile= open(f".list-{model}-{sdk_ver}-Mode-{int(qmode)+1}","r")
listfile.seek(0,2)
listfile.seek(listfile.tell()-1,0)
if(listfile.read()=='%'):
listfile.seek(0,0)
listmode()
listfile.close()
else:
listfile.close()
print(f"List not populated, Fallback to Mode-{int(qmode)+1}")
mode=(f"{int(qmode)+1}")
exec()
else:
print(f"List not populated, Fallback to Mode-{int(qmode)+1}")
mode=(f"{int(qmode)+1}")
exec()
elif(qmode=='0'):
print(f"\n\t{bcolors.FAIL}RETURN:{bcolors.ENDC} Mode selection initiated\n")
modesel()
else:
print(f"\n\t{bcolors.FAIL}RETURN:{bcolors.ENDC} No or Illegal Input detected\n")
modesel()
elif(mode=='2'):
print("\n Looking for updatable packages...")
adbproc=subprocess.Popen(["adb","shell","pm","list","packages","-e","--show-versioncode","|","cut","-f2-3","-d:"],stdout=subprocess.PIPE,stderr=subprocess.DEVNULL)
adbout = adbproc.stdout.readlines()
listfile=open(f".list-{model}-{sdk_ver}-Mode-2","w")
directmode()
listfile.write('%')
listfile.close()
elif(mode=='3'):
print("\n Looking for updatable packages...")
adbproc=subprocess.Popen(["adb","shell","pm","list","packages","--show-versioncode","|","cut","-f2-3","-d:"],stdout=subprocess.PIPE,stderr=subprocess.DEVNULL)
adbout = adbproc.stdout.readlines()
listfile=open(f".list-{model}-{sdk_ver}-Mode-3","w")
directmode()
listfile.write('%')
listfile.close()
elif(mode=='0'):
sys.exit(f"\n\t{bcolors.FAIL}QUIT:{bcolors.ENDC} Program Aborted by User\n")
else:
sys.exit(f"\n\t{bcolors.FAIL}QUIT:{bcolors.ENDC} No or Illegal Input detected\n")
def directmode():
global package_name,versioncode,listfile
for pkginsinfo in adbout:
x=pkginsinfo.decode('utf-8')
x=x.strip()
x=x.split(' ')
package_name=x[0]
y=x[1].split(':')
versioncode=y[1]
print(f"\033[A {bcolors.OKBLUE}Looking for updatable packages...{bcolors.ENDC}")
loadanimate()
urlproc()
update()
listfile.flush()
def listmode():
global package_name,versioncode,listfile
lines = listfile.read()
lines = lines.split('$')
lines.pop()
for line in lines:
vercheck=subprocess.Popen(["adb","shell","pm","list","packages","--show-versioncode","|","grep","-w",line,"|","cut","-f3","-d:"],stdout=subprocess.PIPE,stderr=subprocess.DEVNULL)
verinfo = vercheck.stdout.read()
verinfo = verinfo.decode('utf-8')
verinfo = verinfo.strip()
versioncode=verinfo
package_name=line
print(f"\033[A {bcolors.OKBLUE}Looking for updatable packages...{bcolors.ENDC}")
loadanimate()
urlproc()
update()
def loadanimate():
global i
if(i==0):
print(f'\033[A{bcolors.OKBLUE}⢿{bcolors.ENDC}')
elif(i==1):
print(f'\033[A{bcolors.OKBLUE}⣻{bcolors.ENDC}')
elif(i==2):
print(f'\033[A{bcolors.OKBLUE}⣽{bcolors.ENDC}')
elif(i==3):
print(f'\033[A{bcolors.OKBLUE}⣾{bcolors.ENDC}')
elif(i==4):
print(f'\033[A{bcolors.OKBLUE}⣷{bcolors.ENDC}')
elif(i==5):
print(f'\033[A{bcolors.OKBLUE}⣯{bcolors.ENDC}')
elif(i==6):
print(f'\033[A{bcolors.OKBLUE}⣟{bcolors.ENDC}')
elif(i==7):
print(f'\033[A{bcolors.OKBLUE}⡿{bcolors.ENDC}')
i=-1
i+=1
def insproc():
global insout
insproc=subprocess.Popen(["adb","install","-r",file[0]],stdout=subprocess.PIPE,stderr=subprocess.DEVNULL)
insout = insproc.stdout.readlines()
def update():
global errorcount,pkgcount,file,listfile
match = [m.groupdict() for m in regex_info.finditer(url)]
if not match:
# Showing error message from samsung servers
error_msg = re.compile(r"resultMsg>(.*)</resultMsg>").findall(url)
while(error_msg==0):
urlproc()
error_msg = re.compile(r"resultMsg>(.*)</resultMsg>").findall(url)
if (error_msg[0] !='Application is not approved as stub' and error_msg[0] !="Couldn't find your app which matches requested conditions. Please check distributed conditions of your app like device, country, mcc, mnc, csc, api level" and error_msg[0] !='Application is not allowed to use stubDownload' and error_msg[0] !='This call is unnecessary and blocked. Please contact administrator of GalaxyApps server.'):
errorcount+=1
print(f'\033[A{bcolors.OKPURPLE}⣿{bcolors.ENDC}')
print(f'\033[A{bcolors.FAIL} Looking for updatable packages... ERROR(%d): {bcolors.INFOYELLOW}"{error_msg[0]}"{bcolors.ENDC}'%(errorcount))
print('\033[A ')
return
return
match = match[0]
pkgcount+=1
print(f'\033[A{bcolors.OKPURPLE}⣿{bcolors.ENDC}')
print(f"\033[A {bcolors.OKPURPLE}Found(%d) %s{bcolors.ENDC}"%(pkgcount,package_name))
if(mode=='2' or mode=='3'):
listfile.write(package_name)
listfile.write('$')
if(match['vs_code']>versioncode):
print(f'\033[A{bcolors.OKPURPLE}⣿{bcolors.ENDC}')
print(f" {bcolors.INFOYELLOW}Update Availabe!\n{bcolors.ENDC}")
print(f" Version code{bcolors.OKPURPLE}(Server) : %s{bcolors.ENDC}"%(match['vs_code']))
print(f" Version code{bcolors.OKBLUE}(Installed) : %s\n{bcolors.ENDC}"%(versioncode))
continue_msg = input(f"{bcolors.OKBLUE}Do you want to install this version? {bcolors.INFOYELLOW}[Y/n]: ")
print('\n')
# Download the apk file
while continue_msg not in ["Y", "y", "", "N", "n"]:
continue_msg = input(f"{bcolors.OKBLUE}\033[AInput Error. choose {bcolors.INFOYELLOW}[Y/n]: ")
else:
if continue_msg in ("N", "n"):
print(f"{bcolors.OKBLUE}\033[AOkay, You may try again any time :)\n\n{bcolors.ENDC}")
if continue_msg in ("Y", "y", ""):
print(f"{bcolors.OKBLUE}\033[ADownload started!... {bcolors.ENDC}")
file = request.urlretrieve(match["uri"], f'{package_name}.apk')
print(f"{bcolors.OKBLUE}APK saved: {bcolors.INFOYELLOW}{os.getcwd()}/{file[0]}{bcolors.ENDC}")
print(f"\n{bcolors.OKBLUE}Install started!...{bcolors.ENDC}")
insproc()
while(insout[1]!=b'Success\n'):
insproc()
print(f"{bcolors.FAIL}ERROR : Device not connected or authorization not granted{bcolors.ENDC}")
print(f"{bcolors.INFOYELLOW}INFO : Connect the device and grant authorization for USB debugging{bcolors.ENDC}\033[A\033[A")
print(" ")
print(" \033[A\033[A")
print(f"{bcolors.OKPURPLE}{insout[0].decode('utf-8')}{bcolors.ENDC}{bcolors.OKGREEN}{insout[1].decode('utf-8')}{bcolors.ENDC}")
print(f"{bcolors.OKPURPLE}Running Post-install Cleanup{bcolors.ENDC}")
os.remove(file[0])
print(f"{bcolors.INFOYELLOW}APK Deleted!{bcolors.ENDC}")
print(f"{bcolors.OKGREEN}DONE!{bcolors.ENDC}\n\n")
elif(match['vs_code']==versioncode):
print(f'\033[A{bcolors.OKPURPLE}⣿{bcolors.ENDC}')
print(f" {bcolors.OKGREEN}Already the latest version\n\n{bcolors.ENDC}")
else:
print(f'\033[A{bcolors.OKPURPLE}⣿{bcolors.ENDC}')
print(f" {bcolors.INFOYELLOW}Installed version higher than that on server?!!\n{bcolors.ENDC}")
print(f" Version code{bcolors.OKPURPLE}(Server) : %s{bcolors.ENDC}"%(match['vs_code']))
print(f" Version code{bcolors.OKBLUE}(Installed) : %s\n{bcolors.ENDC}\n"%(versioncode))
def urlproc():
global url
url = url_format.format(package_name, model.upper(), sdk_ver)
url = requests.get(url).text
# set url format
url_format = "https://vas.samsungapps.com/stub/stubDownload.as?appId={}&deviceId={}" \
"&mcc=425&mnc=01&csc=ILO&sdkVer={}&pd=0&systemId=1608665720954&callerId=com.sec.android.app.samsungapps" \
"&abiType=64&extuk=0191d6627f38685f"
i=0
pkgcount=0
errorcount=0
devproc()
while(model==''):
devproc()
print(f"{bcolors.FAIL}ERROR : Device not connected or authorization not granted{bcolors.ENDC}")
print(f"{bcolors.INFOYELLOW}INFO : Connect the device and grant authorization for USB debugging{bcolors.ENDC}\033[A\033[A")
print(" ")
print(" \033[A\033[A")
print("\nDevice Model detected: %s"%(model))
andproc()
print("Android SDK Version: %s\n"%(sdk_ver))
modesel()
print("\n\t--End of package list--")
sys.exit("Operation Completed Successfully")
| 38.77907
| 413
| 0.655272
|
795026a4fff1d07b5be14f3c5ab3c767272151cf
| 9,140
|
py
|
Python
|
bilateral_network/inference.py
|
siyangl/video-object-segmentation-motion-bilateral-network
|
b278fed4fb7f82f97e8e355e54138e4de6c14342
|
[
"MIT"
] | 1
|
2019-03-17T01:34:22.000Z
|
2019-03-17T01:34:22.000Z
|
bilateral_network/inference.py
|
siyangl/video-object-segmentation-motion-bilateral-network
|
b278fed4fb7f82f97e8e355e54138e4de6c14342
|
[
"MIT"
] | null | null | null |
bilateral_network/inference.py
|
siyangl/video-object-segmentation-motion-bilateral-network
|
b278fed4fb7f82f97e8e355e54138e4de6c14342
|
[
"MIT"
] | 2
|
2019-04-29T06:42:40.000Z
|
2019-07-15T01:54:39.000Z
|
import os
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from vos import utils
from data import davis_flow_dataset
flags.DEFINE_string('dataset', 'Davis', 'Dataset used for training')
flags.DEFINE_string('split', 'val', 'Dataset split')
flags.DEFINE_integer('num_classes', 2, 'Number of classes')
flags.DEFINE_string('data_dir', './FlowExamples', 'Dir to TFExamples.')
flags.DEFINE_float('sigma', 3., 'Initial sigma for the Gaussian filter')
flags.DEFINE_integer('kernel_size', 21, 'kernel size')
flags.DEFINE_integer('loc_kernel_size', 37, 'kernel size of loc dim')
flags.DEFINE_float('weight_decay', 0.0005, 'Weight decay factor.')
flags.DEFINE_string('checkpoint', None, 'Continue training from previous checkpoint')
flags.DEFINE_string('output_dir', None, 'Output dir')
flags.DEFINE_integer('batch_size', 1, 'Batch size')
flags.DEFINE_bool('save_raw_results', False, 'Save raw results')
flags.DEFINE_integer('random_select_k_bg', 50000,
'Random select k pixels from low objectness region for data augmentation')
FLAGS = flags.FLAGS
def create_2d_gaussian_kernel(kernel_size, sigma):
half_window = (kernel_size-1)//2
h_feat = np.arange(-half_window, half_window+1)
w_feat = np.arange(-half_window, half_window+1)
hw_feat = np.array(np.meshgrid(h_feat, w_feat, indexing='ij'))
kernel = np.sum((hw_feat/sigma)**2, axis=0)
kernel = np.exp(-kernel)
kernel = kernel/np.sum(kernel)
return kernel
def create_1d_uniform_kernel(kernel_size):
kernel = np.ones(kernel_size)
kernel /= np.sum(kernel)
return kernel
def create_1d_gaussian_kernel(kernel_size, sigma):
half_window = (kernel_size - 1) // 2
h_feat = np.arange(-half_window, half_window + 1)
kernel = np.exp(-0.5*(h_feat/sigma)**2)
kernel = kernel/np.sum(kernel)
return kernel
def l2_regularizer(weight=1.0, scope=None):
"""Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for op_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.op_scope([tensor], scope, 'L2Regularizer'):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer
def main(_):
# Create output dir. Shapes of 4-D tensors and frames are hard-coded.
if not FLAGS.output_dir is None:
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
global_mask_dir = os.path.join(FLAGS.output_dir, 'UnaryOnly')
if not os.path.exists(global_mask_dir):
os.mkdir(global_mask_dir)
global_vis_dir = os.path.join(FLAGS.output_dir, 'UnaryVis')
if not os.path.exists(global_vis_dir):
os.mkdir(global_vis_dir)
global_bnn_dir = os.path.join(FLAGS.output_dir, 'BNNRaw')
if not os.path.exists(global_bnn_dir):
os.mkdir(global_bnn_dir)
flow_grid_size = 40
grid_size = 18
frame_shape = (480, 854)
with tf.Graph().as_default():
dataset = davis_flow_dataset.get_split(FLAGS.split, FLAGS.data_dir)
# Use the no augmentation provider.
(labels, lattice, slice_index, obj, sequence_name,
timestep) = davis_flow_dataset.provide_data(
dataset, shuffle=False, num_epochs=None, batch_size=FLAGS.batch_size)
# Some duplicated code from train code.
flow_init = create_1d_gaussian_kernel(FLAGS.kernel_size, FLAGS.sigma)
loc_init = create_1d_uniform_kernel(kernel_size=FLAGS.loc_kernel_size)
flow_weights_dx = tf.contrib.framework.variable('weights/dx',
shape=flow_init.shape,
initializer=tf.constant_initializer(flow_init),
regularizer=l2_regularizer(FLAGS.weight_decay),
trainable=True)
flow_weights_dy = tf.contrib.framework.variable('weights/dy',
shape=flow_init.shape,
initializer=tf.constant_initializer(flow_init),
regularizer=l2_regularizer(FLAGS.weight_decay),
trainable=True)
weights_x = tf.contrib.framework.variable('weights/x',
shape=loc_init.shape,
initializer=tf.constant_initializer(loc_init),
regularizer=l2_regularizer(FLAGS.weight_decay),
trainable=True)
weights_y = tf.contrib.framework.variable('weights/y',
shape=loc_init.shape,
initializer=tf.constant_initializer(loc_init),
regularizer=l2_regularizer(FLAGS.weight_decay),
trainable=True)
lattice = tf.expand_dims(lattice, 5)
filters_y = tf.reshape(weights_y, [-1, 1, 1, 1, 1])
filters_x = tf.reshape(weights_x, [-1, 1, 1, 1, 1])
filters_dx = tf.reshape(flow_weights_dx, [1, -1, 1, 1, 1])
filters_dy = tf.reshape(flow_weights_dy, [1, 1, -1, 1, 1])
lattice = tf.transpose(lattice, [0, 2, 1, 3, 4, 5]) # [b, x, y, dx, dy]
lattice = tf.reshape(lattice, [FLAGS.batch_size * grid_size, grid_size,
flow_grid_size, flow_grid_size, 1]) # [b*x, y, dx, dy]
filtered = tf.nn.convolution(lattice, filters_y, padding='SAME') # conv along y
filtered = tf.reshape(filtered, [FLAGS.batch_size, grid_size, grid_size,
flow_grid_size, flow_grid_size, 1]) # [b, x, y, dx, dy]
filtered = tf.transpose(filtered, [0, 2, 1, 3, 4, 5]) # [b, y, x, dx, dy]
filtered = tf.reshape(filtered, [FLAGS.batch_size * grid_size, grid_size,
flow_grid_size, flow_grid_size, 1]) # [b*y, x, dx, dy]
filtered = tf.nn.convolution(filtered, filters_x, padding='SAME') # conv along x
filtered = tf.nn.convolution(filtered, filters_dx, padding='SAME') # conv along dx
filtered = tf.nn.convolution(filtered, filters_dy, padding='SAME') # conv along dy
filtered = tf.reshape(filtered, [FLAGS.batch_size, -1])
sliced_batch_idx = tf.expand_dims(tf.range(FLAGS.batch_size, dtype=tf.int64), 1)
sliced_batch_idx = tf.reshape(tf.tile(sliced_batch_idx, [1, frame_shape[0] * frame_shape[1]]), [-1])
slice_index = tf.stack((sliced_batch_idx, tf.reshape(slice_index, [-1])), axis=1)
sliced = tf.gather_nd(filtered, slice_index)
sliced = tf.reshape(sliced, [FLAGS.batch_size, frame_shape[0], frame_shape[1]])
sliced = tf.nn.relu(sliced)
# Scale the results according to the number of bg pixels for splatting during training
# The network is almost linear (without the last relu layer), if 100k pixels have obj < 0.001,
# and used for splatting, the overall scale will not match traning, where only 50k pixels are
# splatted onto the lattice.
sliced = sliced * FLAGS.random_select_k_bg / tf.reduce_sum(lattice)
sliced = tf.reshape(sliced, [FLAGS.batch_size, frame_shape[0], frame_shape[1]])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
restorer = tf.train.Saver(tf.global_variables())
restorer.restore(sess, FLAGS.checkpoint)
tf.train.start_queue_runners(sess=sess)
for i in range(davis_flow_dataset._SPLITS_TO_SIZES[FLAGS.split]):
[sliced_out, name_out, frame, obj_out] = sess.run(
[sliced, sequence_name, timestep, obj])
sliced_out = (1 / (1 + np.exp(-sliced_out)) - 0.5) * 2
if not FLAGS.output_dir is None:
if FLAGS.save_raw_results:
output_dir = os.path.join(FLAGS.output_dir, name_out[0])
if not os.path.exists(output_dir):
os.mkdir(output_dir)
np.save(os.path.join(output_dir, '%05d.npy'%frame[0]), sliced_out)
mask_dir = os.path.join(global_mask_dir, name_out[0])
if not os.path.exists(mask_dir):
os.mkdir(mask_dir)
mask = np.squeeze(obj_out > sliced_out)
cv2.imwrite(os.path.join(mask_dir, '%05d.png'%frame[0]), mask.astype(np.uint8)*255)
vis_dir = os.path.join(global_vis_dir, name_out[0])
if not os.path.exists(vis_dir):
os.mkdir(vis_dir)
cv2.imwrite(os.path.join(vis_dir, '%05d.jpg' % frame[0]),
utils.get_heatmap(sliced_out))
bnn_dir = os.path.join(global_bnn_dir, name_out[0])
if not os.path.exists(bnn_dir):
os.mkdir(bnn_dir)
raw_mask = np.squeeze(0.5 > sliced_out)
cv2.imwrite(os.path.join(bnn_dir, '%05d.png' % frame[0]),
raw_mask.astype(np.uint8) * 255)
if __name__ == '__main__':
app.run()
| 45.024631
| 104
| 0.633479
|
795026e41048008f28b0f0af7a1e134628864f65
| 2,316
|
py
|
Python
|
asab/task.py
|
TeskaLabs/asab
|
f28894b62bad192d8d30df01a8ad1b842ee2a2fb
|
[
"BSD-3-Clause"
] | 23
|
2018-03-07T18:58:13.000Z
|
2022-03-29T17:11:47.000Z
|
asab/task.py
|
TeskaLabs/asab
|
f28894b62bad192d8d30df01a8ad1b842ee2a2fb
|
[
"BSD-3-Clause"
] | 87
|
2018-04-04T19:44:13.000Z
|
2022-03-31T11:18:00.000Z
|
asab/task.py
|
TeskaLabs/asab
|
f28894b62bad192d8d30df01a8ad1b842ee2a2fb
|
[
"BSD-3-Clause"
] | 10
|
2018-04-30T16:40:25.000Z
|
2022-03-09T10:55:24.000Z
|
import logging
import asyncio
import asab
#
L = logging.getLogger(__name__)
#
class TaskService(asab.Service):
'''
Task service is for managed execution of fire-and-forget, one-off, background tasks.
The task is a coroutine, future (asyncio.ensure_future) or task (asyncio.create_task).
The task is executed in the main event loop.
The task should be a relatively short-lived (~5 seconds) asynchronous procedure.
The result of the task is collected (and discarted) automatically
and if there was an exception, it will be printed to the log.
'''
def __init__(self, app, service_name="asab.TaskService"):
super().__init__(app, service_name)
self.NewTasks = []
self.PendingTasks = set()
self.Main = None
async def initialize(self, app):
self.start()
def start(self):
assert(self.Main is None)
self.Main = asyncio.ensure_future(self.main())
self.Main.add_done_callback(self._main_task_exited)
async def finalize(self, app):
if self.Main is not None:
task = self.Main
self.Main = None
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
except Exception as e:
L.exception("Error '{}' during task service:".format(e))
total_tasks = len(self.PendingTasks) + len(self.NewTasks)
if total_tasks > 0:
L.warning("{} pending and incompleted tasks".format(total_tasks))
def _main_task_exited(self, ctx):
if self.Main is None:
return
try:
self.Main.result()
except asyncio.CancelledError:
pass
except Exception as e:
L.exception("Error '{}' during task service:".format(e))
self.Main = None
L.warning("Main task exited unexpectedly, restarting ...")
self.start()
def schedule(self, *tasks):
'''
Schedule execution of task(s).
Tasks will be started in 1-5 seconds (not immediately).
Task can be a simple coroutine, future or task.
'''
self.NewTasks.extend(tasks)
async def main(self):
while True:
while len(self.NewTasks) > 0:
task = self.NewTasks.pop()
self.PendingTasks.add(task)
if len(self.PendingTasks) == 0:
await asyncio.sleep(5.0)
else:
done, self.PendingTasks = await asyncio.wait(self.PendingTasks, timeout=1.0)
for task in done:
try:
await task
except Exception as e:
L.exception("Error '{}' during task:".format(e))
| 22.485437
| 87
| 0.695596
|
79502845675e4de52f2ad916906bac3e5b1384e6
| 650
|
py
|
Python
|
test.py
|
UWB-ACM/ACMBot
|
7830eb2112e91ee037ca248f243e635bd65782df
|
[
"MIT"
] | null | null | null |
test.py
|
UWB-ACM/ACMBot
|
7830eb2112e91ee037ca248f243e635bd65782df
|
[
"MIT"
] | null | null | null |
test.py
|
UWB-ACM/ACMBot
|
7830eb2112e91ee037ca248f243e635bd65782df
|
[
"MIT"
] | null | null | null |
import sys
import unittest
import doctest
# all of the testable modules
# when another module needs to be incorporated into tests
# they should be added here
import cogs.base
test_modules = [
cogs.base
]
def load_tests(tests):
# add each of the test modules
for mod in test_modules:
tests.addTests(doctest.DocTestSuite(mod))
return tests
if __name__ == '__main__':
"""
runs the tests
"""
tests = unittest.TestSuite()
test = load_tests(tests)
runner = unittest.TextTestRunner()
# get the exit code and return when failed
ret = not runner.run(tests).wasSuccessful()
sys.exit(ret)
| 19.117647
| 57
| 0.681538
|
7950287654fb772cf1113739c66a1436122fbb9b
| 5,184
|
py
|
Python
|
test/brahe/test_attitude.py
|
duncaneddy/brahe
|
4a1746ef3c14211b0709de6e7e34b6f52fc0e686
|
[
"MIT"
] | 14
|
2019-05-29T13:36:55.000Z
|
2022-02-11T15:26:13.000Z
|
test/brahe/test_attitude.py
|
duncaneddy/brahe
|
4a1746ef3c14211b0709de6e7e34b6f52fc0e686
|
[
"MIT"
] | 1
|
2020-05-27T12:14:39.000Z
|
2020-05-27T15:51:21.000Z
|
test/brahe/test_attitude.py
|
duncaneddy/brahe
|
4a1746ef3c14211b0709de6e7e34b6f52fc0e686
|
[
"MIT"
] | 2
|
2019-10-24T05:20:54.000Z
|
2019-12-08T03:59:10.000Z
|
# Test Imports
from pytest import approx
from math import sin, cos, pi, sqrt
# Modules Under Test
from brahe.constants import *
from brahe.epoch import *
from brahe.attitude import *
def test_Rx():
deg = 45.0
rad = deg*pi/180
r = Rx(deg, use_degrees=True)
tol = 1e-8
assert approx(r[0, 0], 1.0, abs=tol)
assert approx(r[0, 1], 0.0, abs=tol)
assert approx(r[0, 2], 0.0, abs=tol)
assert approx(r[1, 0], 0.0, abs=tol)
assert approx(r[1, 1], +cos(rad), abs=tol)
assert approx(r[1, 2], +sin(rad), abs=tol)
assert approx(r[2, 0], 0.0, abs=tol)
assert approx(r[2, 1], -sin(rad), abs=tol)
assert approx(r[2, 2], +cos(rad), abs=tol)
# Test 30 Degrees
r = Rx(30, True)
assert r[0, 0] == 1.0
assert r[0, 1] == 0.0
assert r[0, 2] == 0.0
assert r[1, 0] == 0.0
assert r[1, 1] == approx(sqrt(3)/2, abs=1e-12)
assert r[1, 2] == approx(1/2, abs=1e-12)
assert r[2, 0] == 0.0
assert r[2, 1] == approx(-1/2, abs=1e-12)
assert r[2, 2] == approx(sqrt(3)/2, abs=1e-12)
# Test 45 Degrees
r = Rx(45, True)
assert r[0, 0] == 1.0
assert r[0, 1] == 0.0
assert r[0, 2] == 0.0
assert r[1, 0] == 0.0
assert r[1, 1] == approx(sqrt(2)/2, abs=1e-12)
assert r[1, 2] == approx(sqrt(2)/2, abs=1e-12)
assert r[2, 0] == 0.0
assert r[2, 1] == approx(-sqrt(2)/2, abs=1e-12)
assert r[2, 2] == approx(sqrt(2)/2, abs=1e-12)
# Test 225 Degrees
r = Rx(225, True)
assert r[0, 0] == 1.0
assert r[0, 1] == 0.0
assert r[0, 2] == 0.0
assert r[1, 0] == 0.0
assert r[1, 1] == approx(-sqrt(2)/2, abs=1e-12)
assert r[1, 2] == approx(-sqrt(2)/2, abs=1e-12)
assert r[2, 0] == 0.0
assert r[2, 1] == approx(sqrt(2)/2, abs=1e-12)
assert r[2, 2] == approx(-sqrt(2)/2, abs=1e-12)
def test_Ry():
deg = 45.0
rad = deg*pi/180
r = Ry(deg, use_degrees=True)
tol = 1e-8
assert approx(r[0, 0], +cos(rad), abs=tol)
assert approx(r[0, 1], 0.0, abs=tol)
assert approx(r[0, 2], -sin(rad), abs=tol)
assert approx(r[1, 0], 0.0, abs=tol)
assert approx(r[1, 1], 1.0, abs=tol)
assert approx(r[1, 2], 0.0, abs=tol)
assert approx(r[2, 0], +sin(rad), abs=tol)
assert approx(r[2, 1], 0.0, abs=tol)
assert approx(r[2, 2], +cos(rad), abs=tol)
# Test 30 Degrees
r = Ry(30, True)
assert r[0, 0] == approx(sqrt(3)/2, abs=1e-12)
assert r[0, 1] == 0.0
assert r[0, 2] == approx(-1/2, abs=1e-12)
assert r[1, 0] == 0.0
assert r[1, 1] == 1.0
assert r[1, 2] == 0.0
assert r[2, 0] == approx(1/2, abs=1e-12)
assert r[2, 1] == 0.0
assert r[2, 2] == approx(sqrt(3)/2, abs=1e-12)
# Test 45 Degrees
r = Ry(45, True)
assert r[0, 0] == approx(sqrt(2)/2, abs=1e-12)
assert r[0, 1] == 0.0
assert r[0, 2] == approx(-sqrt(2)/2, abs=1e-12)
assert r[1, 0] == 0.0
assert r[1, 1] == 1.0
assert r[1, 2] == 0.0
assert r[2, 0] == approx(sqrt(2)/2, abs=1e-12)
assert r[2, 1] == 0.0
assert r[2, 2] == approx(sqrt(2)/2, abs=1e-12)
# Test 225 Degrees
r = Ry(225, True)
assert r[0, 0] == approx(-sqrt(2)/2, abs=1e-12)
assert r[0, 1] == 0.0
assert r[0, 2] == approx(sqrt(2)/2, abs=1e-12)
assert r[1, 0] == 0.0
assert r[1, 1] == 1.0
assert r[1, 2] == 0.0
assert r[2, 0] == approx(-sqrt(2)/2, abs=1e-12)
assert r[2, 1] == 0.0
assert r[2, 2] == approx(-sqrt(2)/2, abs=1e-12)
def test_Rz():
deg = 45.0
rad = deg*pi/180
r = Rz(deg, use_degrees=True)
tol = 1e-8
assert approx(r[0, 0], +cos(rad), abs=tol)
assert approx(r[0, 1], +sin(rad), abs=tol)
assert approx(r[0, 2], 0.0, abs=tol)
assert approx(r[1, 0], -sin(rad), abs=tol)
assert approx(r[1, 1], +cos(rad), abs=tol)
assert approx(r[1, 2], 0.0, abs=tol)
assert approx(r[2, 0], 0.0, abs=tol)
assert approx(r[2, 1], 0.0, abs=tol)
assert approx(r[2, 2], 1.0, abs=tol)
# Test 30 Degrees
r = Rz(30, True)
assert r[0, 0] == approx(sqrt(3)/2, abs=1e-12)
assert r[0, 1] == approx(1/2, abs=1e-12)
assert r[0, 2] == 0.0
assert r[1, 0] == approx(-1/2, abs=1e-12)
assert r[1, 1] == approx(sqrt(3)/2, abs=1e-12)
assert r[1, 2] == 0.0
assert r[2, 0] == 0.0
assert r[2, 1] == 0.0
assert r[2, 2] == 1.0
# Test 45 Degrees
r = Rz(45, True)
assert r[0, 0] == approx(sqrt(2)/2, abs=1e-12)
assert r[0, 1] == approx(sqrt(2)/2, abs=1e-12)
assert r[0, 2] == 0.0
assert r[1, 0] == approx(-sqrt(2)/2, abs=1e-12)
assert r[1, 1] == approx(sqrt(2)/2, abs=1e-12)
assert r[1, 2] == 0.0
assert r[2, 0] == 0.0
assert r[2, 1] == 0.0
assert r[2, 2] == 1.0
# Test 225 Degrees
r = Rz(225, True)
assert r[0, 0] == approx(-sqrt(2)/2, abs=1e-12)
assert r[0, 1] == approx(-sqrt(2)/2, abs=1e-12)
assert r[0, 2] == 0.0
assert r[1, 0] == approx(sqrt(2)/2, abs=1e-12)
assert r[1, 1] == approx(-sqrt(2)/2, abs=1e-12)
assert r[1, 2] == 0.0
assert r[2, 0] == 0.0
assert r[2, 1] == 0.0
assert r[2, 2] == 1.0
| 26.181818
| 51
| 0.512346
|
79502987d5104aac630635b82708c07d0dfe2911
| 151
|
py
|
Python
|
sqlite/03.py
|
mallimuondu/python-practice
|
64fce60a646032152db8b35e20df06b2edc349ae
|
[
"MIT"
] | null | null | null |
sqlite/03.py
|
mallimuondu/python-practice
|
64fce60a646032152db8b35e20df06b2edc349ae
|
[
"MIT"
] | null | null | null |
sqlite/03.py
|
mallimuondu/python-practice
|
64fce60a646032152db8b35e20df06b2edc349ae
|
[
"MIT"
] | null | null | null |
def uppdate():
while True:
name = input("enter your name: ")
if not name.isalpha():
continue
break
uppdate()
| 18.875
| 42
| 0.509934
|
7950298cc89dfd048251aaa9429cb98d8ad3ed33
| 9,120
|
py
|
Python
|
script/show_result.py
|
ARQ-CRISP/bopt_grasp_quality
|
219372e6644005651e166ed3091c5410385c7d30
|
[
"MIT"
] | null | null | null |
script/show_result.py
|
ARQ-CRISP/bopt_grasp_quality
|
219372e6644005651e166ed3091c5410385c7d30
|
[
"MIT"
] | null | null | null |
script/show_result.py
|
ARQ-CRISP/bopt_grasp_quality
|
219372e6644005651e166ed3091c5410385c7d30
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import skopt
import argparse
import matplotlib.pyplot as plt
import matplotlib.font_manager
import numpy as np
from rospkg.rospack import RosPack
from skopt.acquisition import gaussian_ei
# from skopt.plots import plot_convergence
matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
# plt.rcParams["figure.figsize"] = (8, 14)
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.sans-serif'] = ['Helvetica']
pack_name = 'bopt_grasp_quality'
pkg_path = RosPack().get_path(pack_name)
def plot_result1D(result, n_samples=400):
conf95 = 1.96
result = result
fig = plt.figure()
plt.ion()
plt.title('Estimated Metric')
history_x = np.array(result.x_iters)
history_y = np.array(result.func_vals)
x = np.linspace(
result.space.bounds[0][0], result.space.bounds[0][1], n_samples).reshape(-1, 1)
x_gp = result.space.transform(x.tolist())
gp = result.models[-1]
y_pred, sigma = gp.predict(x_gp, return_std=True)
plt.plot(x*100, -y_pred, "g--", label=r"$\mu_{GP}(x)$")
plt.fill(
np.concatenate([x, x[::-1]])*100,
np.concatenate(
[-y_pred - conf95 * sigma, (-y_pred + conf95 * sigma)[::-1]]),
alpha=.2, fc="g", ec="None")
# Plot sampled points
plt.plot(history_x * 100, -history_y,
"r.", markersize=8, label="Observations")
plt.plot(np.array(result.x)*100, -result.fun, '.y', markersize=10, label='best value')
plt.xlabel('Hand position (cm)')
plt.ylabel('Grasp Metric')
plt.legend()
plt.draw()
plt.grid()
plt.pause(0.1)
def plot_history1D(res, iters, n_samples=400):
x = np.linspace(
res.space.bounds[0][0], res.space.bounds[0][1], n_samples).reshape(-1, 1)
x_gp = res.space.transform(x.tolist())
# fx = np.array([f(x_i, noise_level=0.0) for x_i in x])
conf95 = 1.96
r_start = len(res.x_iters) - len(res.models)
# result = result
max_iters = len(res.x_iters) + 1
illegal_iters = filter(lambda x: x < 0 or x >= len(res.models), iters)
iters = filter(lambda x: x >= 0 and x < len(res.models), iters)
# print(2.8 * len(iters))
fig = plt.figure(figsize=(8, 2.8 * len(iters)))
plt.suptitle('Iteration History')
plt.ion()
print('WARNING: iterations {} not existing'.format(illegal_iters))
for idx, n_iter in enumerate(iters):
gp = res.models[n_iter]
plt.subplot(len(iters), 2, 2*idx+1)
plt.title('Iteration {:d}'.format(n_iter))
curr_x_iters = res.x_iters[:min(max_iters, r_start + n_iter+1)]
curr_func_vals = res.func_vals[:min(max_iters, r_start + n_iter+1)]
y_pred, sigma = gp.predict(x_gp, return_std=True)
plt.plot(x * 100, -y_pred, "g--", label=r"$\mu_{GP}(x)$")
plt.fill(np.concatenate(np.array([x, x[::-1]]) * 100),
-np.concatenate([y_pred - conf95 * sigma,
(y_pred + conf95 * sigma)[::-1]]),
alpha=.2, fc="g", ec="None")
# Plot sampled points
plt.plot(np.array(curr_x_iters) * 100, -np.array(curr_func_vals),
"r.", markersize=8, label="Observations")
# Adjust plot layout
plt.grid()
if n_iter + 1 == len(res.models):
plt.plot(np.array(res.x) * 100, -res.fun, 'Xc', markersize=14, label='Best value')
if idx == len(iters)-1:
plt.legend(bbox_to_anchor=(0.5, -0.30), loc='upper center', ncol=2)
# plt.legend(loc="best", prop={'size': 6*4/len(iters)}, numpoints=1)
plt.xlabel('Hand Position (cm)')
if idx + 1 != len(iters):
plt.tick_params(axis='x', which='both', bottom='off',
top='off', labelbottom='off')
# Plot EI(x)
plt.subplot(len(iters), 2, 2*idx+2)
acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
plt.plot(x*100, acq, "b", label="EI(x)")
plt.fill_between(x.ravel() *100, -2.0, acq.ravel(), alpha=0.3, color='blue')
if r_start + n_iter + 1 < max_iters:
next_x = (res.x_iters + [res.x])[min(max_iters, r_start + n_iter + 1)]
next_acq = gaussian_ei(res.space.transform([next_x]), gp,
y_opt=np.min(curr_func_vals))
plt.plot(np.array(next_x) * 100, next_acq, "bo", markersize=6,
label="Next query point")
# Adjust plot layout
plt.ylim(0, 1.1)
plt.grid()
if idx == len(iters) - 1:
plt.legend(bbox_to_anchor=(0.5, -0.30), loc='upper center', ncol=2)
plt.xlabel('Hand Position (cm)')
# plt.legend(loc="best", prop={'size': 6*4/len(iters)}, numpoints=1)
if idx + 1 != len(iters):
plt.tick_params(axis='x', which='both', bottom='off',
top='off', labelbottom='off')
plt.show()
def plot_convergence1D(*args, **kwargs):
from scipy.optimize import OptimizeResult
"""Plot one or several convergence traces.
Parameters
----------
args[i] : `OptimizeResult`, list of `OptimizeResult`, or tuple
The result(s) for which to plot the convergence trace.
- if `OptimizeResult`, then draw the corresponding single trace;
- if list of `OptimizeResult`, then draw the corresponding convergence
traces in transparency, along with the average convergence trace;
- if tuple, then `args[i][0]` should be a string label and `args[i][1]`
an `OptimizeResult` or a list of `OptimizeResult`.
ax : `Axes`, optional
The matplotlib axes on which to draw the plot, or `None` to create
a new one.
true_minimum : float, optional
The true minimum value of the function, if known.
yscale : None or string, optional
The scale for the y-axis.
Returns
-------
ax : `Axes`
The matplotlib axes.
"""
fig = plt.figure()
plt.ion()
# <3 legacy python
ax = kwargs.get("ax", None)
true_minimum = kwargs.get("true_minimum", None)
yscale = kwargs.get("yscale", None)
if ax is None:
ax = plt.gca()
ax.set_title("Convergence plot")
ax.set_xlabel("Number of calls $n$")
ax.set_ylabel(r"$\min f(x)$ after $n$ calls")
ax.grid()
if yscale is not None:
ax.set_yscale(yscale)
colors = plt.cm.viridis(np.linspace(0.25, 1.0, len(args)))
for results, color in zip(args, colors):
if isinstance(results, tuple):
name, results = results
else:
name = None
if isinstance(results, OptimizeResult):
n_calls = len(results.x_iters)
mins = [np.min(results.func_vals[:i])
for i in range(1, n_calls + 1)]
ax.plot(range(1, n_calls + 1), mins, c=color,
marker=".", markersize=12, lw=2, label=name)
elif isinstance(results, list):
n_calls = len(results[0].x_iters)
iterations = range(1, n_calls + 1)
mins = [[np.min(r.func_vals[:i]) for i in iterations]
for r in results]
for m in mins:
ax.plot(iterations, m, c=color, alpha=0.2)
ax.plot(iterations, np.mean(mins, axis=0), c=color,
marker=".", markersize=12, lw=2, label=name)
if true_minimum:
ax.axhline(true_minimum, linestyle="--",
color="r", lw=1,
label="True minimum")
if true_minimum or name:
ax.legend(loc="best")
plt.draw()
plt.pause(.1)
return ax
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog='show_result', description="Plots the result of 1D BayesOpt Experiments available types ['history', 'result', 'convergence']")
parser.add_argument('-f', action='store', dest='file', default='BayesOpt.pkl')
parser.add_argument('-t', action='store', dest='type', type=str, default='result')
parser.add_argument('-i', action='store', dest='iters', type=int, default=[0, 1, 2, 3, 4], nargs='+')
parse_res = parser.parse_args()
fun_type = {
'history': lambda res: plot_history1D(res, parse_res.iters),
'result': lambda res: plot_result1D(res),
'convergence': lambda res: plot_convergence1D(res)
}
splitted = parse_res.file.split('/')
if len(splitted) == 1:
saved_model = pkg_path + '/etc/' + parse_res.file
else:
saved_model = parse_res.file
print('Loading file: {}'.format(saved_model))
res = skopt.load(saved_model)
if parse_res.type in fun_type.keys():
print('Plot type: {}'.format(parse_res.type))
fun_type[parse_res.type](res)
else:
print('[ERROR] requested plot does not exist!')
print('Minima found in: {:.3f}, {:.3f}'.format(res.x[0], res.fun))
end = raw_input('Press key to terminate >> ')
| 36.334661
| 139
| 0.580702
|
795029cad8d440ed2f3d573c08168e34d0a50bb6
| 18,583
|
py
|
Python
|
qbert-agent-ale.py
|
Fenrir12/rl-qbert
|
51ae397846c730169a9b6b76fe12d319fc5a84ff
|
[
"MIT"
] | null | null | null |
qbert-agent-ale.py
|
Fenrir12/rl-qbert
|
51ae397846c730169a9b6b76fe12d319fc5a84ff
|
[
"MIT"
] | null | null | null |
qbert-agent-ale.py
|
Fenrir12/rl-qbert
|
51ae397846c730169a9b6b76fe12d319fc5a84ff
|
[
"MIT"
] | null | null | null |
from ale_python_interface import ALEInterface
import gym
import numpy as np
import random as rd
import matplotlib.pyplot as plt
import sys
import time as t
from PIL import Image
WEIGHTS1 = [1.3826337386217185, 23.894746079161084, 8.801830487930047, 11.254706535442095, 0.5956519333495852, 8.779244143679769, 1.2142990476462545, 1.5014086491630236, 1.2340120376539887, 1.2536234329023972, 1.1109156466921406, -1.3385189077421555, 0.4091773262075074, 1.4591866846765025, 1.7628712271103488, 2.177067408798442, 0.38667275775135457, 1.249181200223059, 2.208181286057919, 1.2595264191424724, 1.690644813808155, 0.21153815086304964, 0.9419314708311681, 1.085455920333917, 1.372615691498354, 0.9592344002780562, 1.2591047488657916, 13.684806533175662, 13.138060227438961, 11.44460497846998, 16.383418276389474]
# Atrocious function to generate future locations depending on action
# WARNING !! May hurt your eyes
def gen_future_locs(locs):
fut_locs = {}
for n in range(21):
fut_locs[str(n)] = 6 * [0]
for idx, loc in enumerate(BOX_LOCS):
# All future locs depending on position
if idx == 0:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 'NOPE'
fut_locs[str(idx)][3] = 2
fut_locs[str(idx)][4] = 'NOPE'
fut_locs[str(idx)][5] = 1
elif idx == 1:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 0
fut_locs[str(idx)][3] = 4
fut_locs[str(idx)][4] = 'NOPE'
fut_locs[str(idx)][5] = 3
elif idx == 2:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 'NOPE'
fut_locs[str(idx)][3] = 5
fut_locs[str(idx)][4] = 0
fut_locs[str(idx)][5] = 4
elif idx == 3:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 1
fut_locs[str(idx)][3] = 7
fut_locs[str(idx)][4] = 'NOPE'
fut_locs[str(idx)][5] = 6
elif idx == 4:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 2
fut_locs[str(idx)][3] = 8
fut_locs[str(idx)][4] = 1
fut_locs[str(idx)][5] = 7
elif idx == 5:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 'NOPE'
fut_locs[str(idx)][3] = 9
fut_locs[str(idx)][4] = 2
fut_locs[str(idx)][5] = 8
elif idx == 6:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 3
fut_locs[str(idx)][3] = 11
fut_locs[str(idx)][4] = 'NOPE'
fut_locs[str(idx)][5] = 10
elif idx == 7:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 4
fut_locs[str(idx)][3] = 12
fut_locs[str(idx)][4] = 3
fut_locs[str(idx)][5] = 11
elif idx == 8:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 5
fut_locs[str(idx)][3] = 13
fut_locs[str(idx)][4] = 4
fut_locs[str(idx)][5] = 12
elif idx == 9:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 'NOPE'
fut_locs[str(idx)][3] = 14
fut_locs[str(idx)][4] = 5
fut_locs[str(idx)][5] = 13
elif idx == 10:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 6
fut_locs[str(idx)][3] = 16
fut_locs[str(idx)][4] = 21
fut_locs[str(idx)][5] = 15
elif idx == 11:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 7
fut_locs[str(idx)][3] = 17
fut_locs[str(idx)][4] = 6
fut_locs[str(idx)][5] = 16
elif idx == 12:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 8
fut_locs[str(idx)][3] = 18
fut_locs[str(idx)][4] = 7
fut_locs[str(idx)][5] = 17
elif idx == 13:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 9
fut_locs[str(idx)][3] = 19
fut_locs[str(idx)][4] = 8
fut_locs[str(idx)][5] = 18
elif idx == 14:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 22
fut_locs[str(idx)][3] = 20
fut_locs[str(idx)][4] = 9
fut_locs[str(idx)][5] = 19
elif idx == 15:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 10
fut_locs[str(idx)][3] = 'NOPE'
fut_locs[str(idx)][4] = 'NOPE'
fut_locs[str(idx)][5] = 'NOPE'
elif idx == 16:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 11
fut_locs[str(idx)][3] = 'NOPE'
fut_locs[str(idx)][4] = 10
fut_locs[str(idx)][5] = 'NOPE'
elif idx == 17:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 12
fut_locs[str(idx)][3] = 'NOPE'
fut_locs[str(idx)][4] = 11
fut_locs[str(idx)][5] = 'NOPE'
elif idx == 18:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 13
fut_locs[str(idx)][3] = 'NOPE'
fut_locs[str(idx)][4] = 12
fut_locs[str(idx)][5] = 'NOPE'
elif idx == 19:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 14
fut_locs[str(idx)][3] = 'NOPE'
fut_locs[str(idx)][4] = 13
fut_locs[str(idx)][5] = 'NOPE'
elif idx == 20:
fut_locs[str(idx)][0] = idx
fut_locs[str(idx)][1] = idx
fut_locs[str(idx)][2] = 'NOPE'
fut_locs[str(idx)][3] = 'NOPE'
fut_locs[str(idx)][4] = 14
fut_locs[str(idx)][5] = 'NOPE'
return fut_locs
# return True if pixel is yellow
def is_yellow(px):
if YEL_PIX in px:
return True
return False
# return True if pixel is orange
def is_orange(px):
if [181, 83, 40] in px:
return True
return False
# return True if pixel is green
def is_green(px):
if [36, 146, 85] in px:
return True
return False
# return True if pixel is purple
def is_purple(px):
if [146, 70, 192] in px:
return True
return False
# return True if pixel is blue
def is_blue(px):
if BLUE_PIX in px:
return True
return False
# Gets position of snake on the 0 to 21 boxes
def snake_there(obs):
pos = CHAR_LOCS
# Look for bert location
for idx, pix in enumerate(pos):
if is_purple(obs[pix[1]][pix[0]]):
return idx
return S_LOC
# Gets position of green blob on the 0 to 21 boxes
def blob_there(obs):
pos = CHAR_LOCS
# Look for bert location
for idx, pix in enumerate(pos):
if is_green(obs[pix[1]][pix[0]]):
return idx
return G_LOC
# Gets position of Bert on the 0 to 21 boxes
def bert_there(obs):
pos = CHAR_LOCS
# Look for bert location
for idx, pix in enumerate(pos):
if is_orange(obs[pix[1] - 2][pix[0]]) or is_orange(obs[pix[1] - 2][pix[0] + 1]) or is_orange(
obs[pix[1] - 4][pix[0]]):
return idx
return B_LOC
# FEATURE : Get position feature of bert.
# In a vector of 23, the position of Bert is given value 1
def pos_features(obs, pos):
features = 23 * [0.0]
if pos == 'NOPE':
return features
features[pos] = 1
return features
# FEATURE : Turns to 1 if Bert's position gets him in the void
def doom_feature(obs, pos):
if pos == 'NOPE':
return 0
return 1
# FEATURE : Returns 1 if Bert is on an unconquered box
def color_features(obs, pos):
features = 21 * [0.0]
for idx, pix in enumerate(BOX_LOCS[0:21]):
if is_yellow(obs[pix[1]][pix[0]]):
features[idx] = 1
if features[idx] != 1 and idx == pos:
return 1
return 0
# Returns the number of conquered boxes of the screen
def num_col_boxes(obs):
features = 21 * [0.0]
for idx, pix in enumerate(BOX_LOCS[0:21]):
if is_yellow(obs[pix[1]][pix[0]]):
features[idx] = 1
return sum(features)
# FEATURE : Returns 1 if bert gets closer to the green character
def blob_feature(bert_fut_pos):
if bert_fut_pos == 'NOPE':
bert_pos = CHAR_LOCS[B_LOC]
else:
bert_pos = CHAR_LOCS[bert_fut_pos]
blob_pos = CHAR_LOCS[G_LOC]
dis = np.sqrt((bert_pos[1] - blob_pos[1]) ** 2 + (bert_pos[0] - blob_pos[0]) ** 2)
pres_dis = np.sqrt((CHAR_LOCS[B_LOC][1] - blob_pos[1]) ** 2 + (CHAR_LOCS[B_LOC][0] - blob_pos[0]) ** 2)
if dis < pres_dis:
return 1
return 0
# FEATURE : Returns 0 if the related disk is not present in the screen
def disk_features(obs):
disk1 = 1
disk2 = 1
if [0, 0, 0] in obs[BOX_LOCS[21][1]][BOX_LOCS[21][0]]:
disk1 = 0
if [0, 0, 0] in obs[BOX_LOCS[22][1]][BOX_LOCS[22][0]]:
disk2 = 0
return [disk1, disk2]
# FEATURE : Returns 1 if bert gets closer to the snake
def snakedis_feature(bert_fut_pos):
if bert_fut_pos == 'NOPE':
bert_pos = CHAR_LOCS[B_LOC]
else:
bert_pos = CHAR_LOCS[bert_fut_pos]
snake_pos = CHAR_LOCS[S_LOC]
dis = np.sqrt((bert_pos[1] - snake_pos[1]) ** 2 + (bert_pos[0] - snake_pos[0]) ** 2)
pres_dis = np.sqrt((CHAR_LOCS[B_LOC][1] - snake_pos[1]) ** 2 + (CHAR_LOCS[B_LOC][0] - snake_pos[0]) ** 2)
if dis > pres_dis:
return 0
return 1
# FEATURE : Value 1 if Bert gets closer to an unconquered box
def bluedistance_feature(obs, bert_fut_pos):
if bert_fut_pos == 'NOPE':
bert_pos = CHAR_LOCS[B_LOC]
else:
bert_pos = CHAR_LOCS[bert_fut_pos]
distances = []
pres_distances = []
for idx, pix in enumerate(BOX_LOCS[0:20]):
if is_blue(obs[pix[1]][pix[0]]):
distances.append(
np.sqrt((bert_pos[1] - BOX_LOCS[idx][1]) ** 2 + (bert_pos[0] - BOX_LOCS[idx][0]) ** 2) / 158)
if is_blue(obs[pix[1]][pix[0]]):
pres_distances.append(
np.sqrt((CHAR_LOCS[B_LOC][1] - BOX_LOCS[idx][1]) ** 2 + (
CHAR_LOCS[B_LOC][0] - BOX_LOCS[idx][0]) ** 2) / 158)
if len(distances) == 0:
return 0
mindis = min(distances)
pres_dis = min(pres_distances)
if mindis < pres_dis:
return 1
return 0
# Returns features of possible future states
def predict(obs):
features = []
# Get future position
fut_pos = FUTURE_POS[str(B_LOC)]
for action, pos in enumerate(fut_pos[2:6]):
snakedis = snakedis_feature(pos)
colorsum = color_features(obs, pos)
bluedis = bluedistance_feature(obs, pos)
lives = LIVES / 4
disks = disk_features(obs)
blobdis = blob_feature(pos)
doom = doom_feature(screen, pos)
pos_feat = pos_features(obs, pos)
features.append([snakedis] + [colorsum] + [bluedis] + [lives] + disks + [blobdis] + [doom] + pos_feat)#[snakedis, colorsum, bluedis, lives])# + disks + [blobdis] + [doom] + pos_feat)
return features
# Returns Q values of features with or without optimistic prior
def get_Q(weights, features):
action = 0
Qi = 0
Q = []
for feature in features:
for id in range(NUM_FEATURES):
Qi += feature[id] * weights[id]
if [action, features[action]] in N:
pos = N.index([action, features[action]])
n = Na[pos]
else:
n = 1
action += 1
if n >= Ne:
Q.append(Qi)
else:
if USE_OPTIMISTIC_PRIOR == True:
Q.append(Qi + 1/n*100)
else:
Q.append(Qi)
Qi = 0
return Q
BOX_LOCS = [[78, 38], [65, 66], [94, 66], [54, 95], [78, 95], [106, 95], [42, 124], [66, 124], [94, 124], [118, 124],
[30, 153], [54, 153], [78, 153], [106, 153], [130, 153], [18, 182], [42, 182], [66, 182], [94, 182],
[118, 182], [142, 182], [12, 139], [146, 139]]
CHAR_LOCS = [[77, 28], [65, 56], [93, 56], [53, 85], [77, 85], [105, 85], [41, 114], [65, 114], [93, 114], [117, 114],
[29, 143], [53, 143], [77, 143], [105, 143], [129, 143], [17, 172], [41, 172], [65, 172], [93, 172],
[117, 172], [141, 172], [12, 129], [146, 129]]
# Initialize positions of character and game parameters
B_LOC = 0
S_LOC = 20
G_LOC = 20
CLEARED = 0
STAGE = 1
# Initialize vectors for optimistic priors calculation
Na = []
N = []
Ne = 5
#Gets user defined parameters
SEED = int(sys.argv[1])
USE_OPTIMISTIC_PRIOR = True if sys.argv[2] == 'use_prior' else False
SEE_SCREEN = True if sys.argv[3] == 'set_screen' else False
# Generate future positions of Bert dpeending on current position and action
FUTURE_POS = gen_future_locs(BOX_LOCS)
# Learning hyperparameters
episodes = 400 # how many episodes to wait before moving the weights
max_time = 10000
gamma = 0.99 # discount factor for reward
lr = 1e-4
NUM_FEATURES = 31
weights = [rd.random() for _ in range(NUM_FEATURES)]
e = 0.15 if USE_OPTIMISTIC_PRIOR == False else 0.00
# Initialize learning environment
ale = ALEInterface()
ale.setBool('sound', False)
ale.setBool('display_screen', SEE_SCREEN)
ale.setInt('frame_skip', 1)
ale.setInt('random_seed', SEED)
rd.seed(SEED)
ale.loadROM("qbert.bin")
ELPASED_FRAME = 0
# Possible positions of Bert in the RAM right beforetaking any action
MEM_POS = [[69, 77], [92, 77], [120, 65], [97, 65], [147, 53], [124, 53],
[152, 41], [175, 41], [180, 29], [203, 29], [231, 16], [231, 41],
[175, 65], [180, 53], [203, 53], [147, 77], [120, 93], [152, 65],
[231, 65], [175, 93], [97, 93], [180, 77], [231, 93], [180, 105],
[147, 105], [203, 77], [175, 77], [175, 117], [231, 117], [203, 129],
[203, 105], [180, 129], [231, 141], [152, 117], [124, 77], [124, 105],
[152, 93]]
learning = []
# Limits action set to UP RIGHT LEFT DOWN actions of ALE environment
actions = range(2, 6)
# Starts the learning episodes
for episode in range(episodes):
total_reward = 0
sup_reward = 0
action = 0
rewards = []
ram = ale.getRAM()
Q = 0
last_action = 0
last_Q = 0
last_features = NUM_FEATURES * [rd.random()]
BLUE_PIX = [45, 87, 176]
YEL_PIX = [210, 210, 64]
# Starts iterations of episode
for time in range(max_time):
# Get bert pos in RAM
B_POS = [ram[33], ram[43]]
# Get number of lives remaining
LIVES = ale.lives()
# last_ram = ram
ram = ale.getRAM()
screen = ale.getScreenRGB()
# Updates position of characters
S_LOC = snake_there(screen)
B_LOC = bert_there(screen)
G_LOC = blob_there(screen)
# Bert ready to take new action at permitted position
# and frame 0 of action taking
if ram[0] == 0 and B_POS in MEM_POS and CLEARED == 0:
features = predict(screen)
# e-greedy exploration. Action is updated only at right frames.
if rd.random() < e:
action = rd.choice(actions) - 2
Qs = get_Q(weights, features)
Q = Qs[action]
else:
# policy max Q action
Qs = get_Q(weights, features)
Q = max(Qs)
action = Qs.index(Q)
# Update optimistic prior if used
if [action, features[action]] in N:
pos = N.index([action, features[action]])
Na[pos] += 1
else:
N.append([action, features[action]])
Na.append(1)
# Take action
ale.act(action + 2)
# Gets last meaningful reward in stack of frames
reward = max(rewards)
if B_LOC == S_LOC or FUTURE_POS[str(B_LOC)] == None:
sup_reward = -50
else:
sup_reward = 0
for id in range(len(weights)):
update = reward + sup_reward + gamma * Q - last_Q
weights[id] = weights[id] + lr * update * last_features[id]
# Update state, Q and action and resets rewards vector
last_action = action
last_features = features[last_action]
last_Q = Q
total_reward += reward
rewards = []
else:
# Stack rewards of precedent frames to capture reward associated to right action
rewards.append(ale.act(0))
# Sets the stage as cleared if all boxes are conquered
if num_col_boxes(screen) == 21 and CLEARED == 0:
CLEARED = 1
# Updates color check of is_yellow and is_blue functions of blocks for new stage
if CLEARED == 1 and B_LOC == 0:
STAGE += 1
# Fill with color codes of boxes on each level
if STAGE == 2:
BLUE_PIX = [210, 210, 64]
YEL_PIX = [45, 87, 176]
elif STAGE == 3:
BLUE_PIX = [182, 182, 170]
YEL_PIX = [109, 109, 85]
CLEARED = 0
# Reset game and start new episode if bert is game over
if ale.game_over():
learning.append(total_reward)
plt.xlabel('Episodes (n)')
plt.ylabel('Total reward of episode')
plt.plot(range(0, len(learning)), learning)
plt.pause(0.01)
STAGE = 1
BLUE_PIX = [45, 87, 176]
YEL_PIX = [210, 210, 64]
print("Episode {}:".format(episode))
print(" completed in {} steps".format(time + 1))
print(" total_reward was {}".format(total_reward))
print("Weights are " + str(weights))
ale.reset_game()
break
plt.show()
print('success')
| 34.864916
| 625
| 0.542162
|
79502ab042209e6de1ab0de209f2100a78ed64da
| 8,080
|
py
|
Python
|
src/Laplacian.py
|
tueboesen/Active-Learning
|
a924355e58dbe964b063d1cad08cc47dfcf2530b
|
[
"MIT"
] | null | null | null |
src/Laplacian.py
|
tueboesen/Active-Learning
|
a924355e58dbe964b063d1cad08cc47dfcf2530b
|
[
"MIT"
] | null | null | null |
src/Laplacian.py
|
tueboesen/Active-Learning
|
a924355e58dbe964b063d1cad08cc47dfcf2530b
|
[
"MIT"
] | null | null | null |
import time
import hnswlib
import numpy as np
import torch
from scipy.sparse import coo_matrix, csr_matrix
def ANN_hnsw(x, k=10, euclidian_metric=False, union=True, eff=None,cutoff=False):
"""
Calculates the approximate nearest neighbours using the Hierarchical Navigable Small World Graph for fast ANN search. see: https://github.com/nmslib/hnswlib
:param x: 2D numpy array with the first dimension being different data points, and the second the features of each point.
:param k: Number of neighbours to compute
:param euclidian_metric: Determines whether to use cosine angle or euclidian metric. Possible options are: 'l2' (euclidean) or 'cosine'
:param union: The adjacency matrix will be made symmetrical, this determines whether to include the connections that only go one way or remove them. If union is True, then they are included.
:param eff: determines how accurate the ANNs are built, see https://github.com/nmslib/hnswlib for details.
:param cutoff: Includes a cutoff distance, such that any connection which is smaller than the cutoff is removed. If True, the cutoff is automatically calculated, if False, no cutoff is used, if a number, it is used as the cutoff threshold. Note that the cutoff has a safety built in that makes sure each data point has at least one neighbour to minimize the risk of getting a disjointed graph.
:return: Symmetric adjacency matrix, mean distance of all connections (including the self connections)
"""
nsamples = len(x)
dim = len(x[0])
# Generating sample data
data = x
data_labels = np.arange(nsamples)
if eff is None:
eff = nsamples
# Declaring index
if euclidian_metric:
p = hnswlib.Index(space='l2', dim=dim) # possible options are l2, cosine or ip
else:
p = hnswlib.Index(space='cosine', dim=dim) # possible options are l2, cosine or ip
# Initing index - the maximum number of elements should be known beforehand
p.init_index(max_elements=nsamples, ef_construction=eff, M=200)
# Element insertion (can be called several times):
p.add_items(data, data_labels)
# Controlling the recall by setting ef:
p.set_ef(eff) # ef should always be > k
# Query dataset, k - number of closest elements (returns 2 numpy arrays)
labels, distances = p.knn_query(data, k=k)
t2 = time.time()
if cutoff:
if type(cutoff) is bool: # Automatically determine the threshold
dd_mean = np.mean(distances)
dd_var = np.var(distances)
dd_std = np.sqrt(dd_var)
threshold = dd_mean+dd_std
else:
threshold = cutoff
useable = distances < threshold
useable[:,1] = True # This should hopefully prevent any element from being completely disconnected from the rest (however it might just make two remote elements join together apart from the rest)
else:
useable = distances == distances
Js = []
Is = []
for i,(subnn,useable_row) in enumerate(zip(labels,useable)):
for (itemnn,useable_element) in zip(subnn,useable_row):
if useable_element:
Js.append(itemnn)
Is.append(i)
Vs = np.ones(len(Js),dtype=np.int64)
A = csr_matrix((Vs, (Is,Js)),shape=(nsamples,nsamples))
A.setdiag(0)
if union:
A = (A + A.T).sign()
else:
A = A.sign()
dif = (A-A.T)
idx = dif>0
A[idx] = 0
A.eliminate_zeros()
return A, np.mean(distances)
def compute_laplacian(features,metric='l2',knn=9,union=True,cutoff=False):
"""
Computes a knn Graph-Laplacian based on the features given.
Note that there is room for improvement here, the graph laplacian could be built directly on the distances found by the ANN search (which are approximate) this would inherently ensure that the ANNs actually match the metric used in the graph laplacian, and make it faster.
:param features: Features the graph laplacian will be built on. These can either be given as a torch tensor or numpy array. The first dimension should contain the number of samples, all other dimensions will be flattened.
:param metric: The metric to use when computing approximate neares neighbours. Possible options are l2 or cosine
:param knn: number of nearest neighbours to compute
:param union: The adjacency matrix will be made symmetrical, this determines whether to include the connections that only go one way or remove them. If union is True, then they are included.
:param cutoff: Includes a cutoff distance, such that any connection which is smaller than the cutoff is removed. If True, the cutoff is automatically calculated, if False, no cutoff is used, if a number, it is used as the cutoff threshold. Note that the cutoff has a safety built in that makes sure each data point has at least one neighbour to minimize the risk of getting a disjointed graph.
:return: Graph Laplacian, Adjacency matrix
"""
t1 = time.time()
if isinstance(features, torch.Tensor):
features = features.cpu().numpy()
features = features.reshape(features.shape[0], -1)
A, dd = ANN_hnsw(features, euclidian_metric=metric, union=union, k=knn, cutoff=cutoff)
t2 = time.time()
if metric == 'l2':
L, _ = Laplacian_Euclidian(features, A, dd)
elif metric == 'cosine':
L, _ = Laplacian_angular(features, A)
else:
raise ValueError('{} is not an implemented metric'.format(metric))
t3 = time.time()
print('ANN = {}'.format(t2-t1))
print('L = {}'.format(t3-t2))
return L,A
def Laplacian_Euclidian(X, A, sigma, dt=None):
"""
Computes the Graph Laplacian as: L_ij = A_ij * exp(- ||X_i - X_j||_2^2 / sigma)
:param X: 2D numpy array with the first dimension being different data points, and the second the features of each point.
:param A: Adjacency matrix built with the same metric.
:param sigma: characteristic distance
:param dt: datatype the returned Laplacian should have, if not used, it will default to whatever the datatype of X is.
:return: Graph Laplacian, Normalized symmetric Graph Laplacian
"""
if dt is None:
dt = X.dtype
A = A.tocoo()
n,_=A.shape
I = A.row
J = A.col
tmp = np.sum((X[I] - X[J]) ** 2, axis=1)
V = np.exp(-tmp / (sigma))
W = coo_matrix((V, (I, J)), shape=(n, n))
D = coo_matrix((n, n), dtype=dt)
coo_matrix.setdiag(D, np.squeeze(np.array(np.sum(W, axis=0))))
Dh = np.sqrt(D)
np.reciprocal(Dh.data, out=Dh.data)
L = D - W
L_sym = Dh @ L @ Dh
# (abs(L - L.T) > 1e-10).nnz == 0
L_sym = 0.5 * (L_sym.T + L_sym)
return L,L_sym
def Laplacian_angular(X, A,dt=None):
"""
Computes the Graph Laplacian with cosine angular metric: L_ij = A_ij * (1 - (X_i X_j') /(||X_i||*||X_j||)
:param X: 2D numpy array with the first dimension being different data points, and the second the features of each point.
:param A: Adjacency matrix built with the same metric.
:param dt: datatype the returned Laplacian should have, if not used, it will default to whatever the datatype of X is.
:return: Graph Laplacian, Normalized symmetric Graph Laplacian
"""
if isinstance(X, torch.Tensor):
X = X.numpy()
if dt is None:
dt = X.dtype
A = A.tocoo()
n, _ = A.shape
I = A.row
J = A.col
V = 1-np.sum((X[I] * X[J]), axis=1)/(np.sqrt(np.sum(X[I]**2, axis=1))*np.sqrt(np.sum(X[J]**2, axis=1)))
V[V<0] = 0 #Numerical precision can sometimes lead to some elements being less than zero.
assert np.max(V) < 1, "some elements of V are larger than 1. This means that some neighbours are less than ortogonal, hence absolutely terrible neighbours. What are you doing?"
W = coo_matrix((V, (I, J)), shape=(n, n))
D = coo_matrix((n, n), dtype=dt)
coo_matrix.setdiag(D, np.squeeze(np.array(np.sum(W, axis=0))))
Dh = np.sqrt(D)
np.reciprocal(Dh.data, out=Dh.data)
L = D - W
L_sym = Dh @ L @ Dh
L_sym = 0.5 * (L_sym.T + L_sym)
return L, L_sym
| 47.529412
| 397
| 0.675248
|
79502aced36752ecb8bbda7ccd71c3f2666bdf69
| 5,665
|
py
|
Python
|
build_infrastructure/files/get_key.py
|
opentelekomcloud-infra/csm-ansible-roles
|
64ec3f66013bb2cab67c26085f588eadc3f651a4
|
[
"Apache-2.0"
] | null | null | null |
build_infrastructure/files/get_key.py
|
opentelekomcloud-infra/csm-ansible-roles
|
64ec3f66013bb2cab67c26085f588eadc3f651a4
|
[
"Apache-2.0"
] | 16
|
2020-01-23T19:25:10.000Z
|
2020-12-29T09:55:32.000Z
|
build_infrastructure/files/get_key.py
|
opentelekomcloud-infra/csm-ansible-roles
|
64ec3f66013bb2cab67c26085f588eadc3f651a4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import hashlib
import os
from argparse import ArgumentParser
from dataclasses import dataclass
import requests
from boto3.session import Session
from botocore.exceptions import ClientError
from cryptography.hazmat.backends import default_backend as crypto_default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKeyWithSerialization, RSAPublicKey
from openstack.config import OpenStackConfig
S3_ENDPOINT = 'https://obs.eu-de.otc.t-systems.com'
BUCKET = 'obs-csm'
RW_OWNER = 0o600
def parse_params():
parser = ArgumentParser(description='Synchronize used private key with OBS')
parser.add_argument('--key', '-k', required=True)
parser.add_argument('--output', '-o', required=True)
parser.add_argument('--local', action='store_true', default=False)
args = parser.parse_args()
return args
def generate_key_pair(default_private: bytes = None):
"""Generate key pair as tuple of bytes"""
rsa_backend = crypto_default_backend()
if default_private is None:
private_key: RSAPrivateKeyWithSerialization = rsa.generate_private_key(
backend=rsa_backend,
public_exponent=65537,
key_size=2048
)
else:
private_key = serialization.load_pem_private_key(default_private, None, rsa_backend)
public_key: RSAPublicKey = private_key.public_key()
# noinspection PyTypeChecker
private_bytes: bytes = private_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption()
)
# noinspection PyTypeChecker
public_bytes: bytes = public_key.public_bytes(serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH)
return private_bytes, public_bytes
def requires_update(file_name, remote_md5):
"""Check if local file is not up to date with remote"""
if not os.path.isfile(file_name):
return True
with open(file_name, 'rb') as trg_file:
md5 = hashlib.md5(trg_file.read()).hexdigest()
return remote_md5 != md5
def _generate_new_pair(private_key_file: str) -> bytes:
private_key, key_pub = generate_key_pair()
with open(private_key_file, 'wb') as file:
file.write(private_key)
with open(f'{private_key_file}.pub', 'wb') as file_pub:
file_pub.write(key_pub)
return private_key
def _generate_pub_for_private(private_key_file: str):
with open(private_key_file, 'rb') as file:
private_key = file.read()
_, key_pub = generate_key_pair(private_key)
with open(f'{private_key_file}.pub', 'wb') as file_pub:
file_pub.write(key_pub)
@dataclass
class Credential:
"""Container for credential"""
access: str
secret: str
security_token: str
def get_key_from_s3(key_file, key_name, credential: Credential) -> str:
"""Download existing key from s3 or create a new one and upload"""
session = Session(aws_access_key_id=credential.access,
aws_secret_access_key=credential.secret,
aws_session_token=credential.security_token)
obs = session.resource('s3', endpoint_url=S3_ENDPOINT)
bucket = obs.Bucket(BUCKET)
try:
file_md5 = bucket.Object(key_name).e_tag[1:-1]
except ClientError as cl_e:
if cl_e.response['Error']['Code'] == '404':
print('The object does not exist in s3. Generating new one...')
private_key = _generate_new_pair(key_file)
obj = obs.Object(BUCKET, key_name)
obj.put(Body=private_key)
return key_file
raise cl_e
if requires_update(key_file, file_md5):
bucket.download_file(key_name, key_file)
_generate_pub_for_private(key_file)
print('Private key downloaded')
else:
_generate_pub_for_private(key_file)
print('Private key is up to date')
return key_file
def _session_token_request():
return {
'auth': {
'identity': {
'methods': [
'token'
],
'token': {
'duration-seconds': '900',
}
}
}
}
def _get_session_token(auth_url, os_token) -> Credential:
v30_url = auth_url.replace('/v3', '/v3.0')
token_url = f'{v30_url}/OS-CREDENTIAL/securitytokens'
auth_headers = {'X-Auth-Token': os_token}
response = requests.post(token_url, headers=auth_headers, json=_session_token_request())
if response.status_code != 201:
raise RuntimeError('Failed to get temporary AK/SK:', response.text)
data = response.json()['credential']
return Credential(data['access'], data['secret'], data['securitytoken'])
def acquire_temporary_ak_sk() -> Credential:
"""Get temporary AK/SK using password auth"""
os_config = OpenStackConfig()
cloud = os_config.get_one()
iam_session = cloud.get_session()
auth_url = iam_session.get_endpoint(service_type='identity')
os_token = iam_session.get_token()
return _get_session_token(auth_url, os_token)
def main():
"""Run the script"""
args = parse_params()
key_file = args.output
if args.local:
_generate_new_pair(key_file)
print('Generated local key pair:', key_file)
else:
credential = acquire_temporary_ak_sk()
key_file = get_key_from_s3(key_file, args.key, credential)
os.chmod(key_file, RW_OWNER)
if __name__ == '__main__':
main()
| 32.371429
| 102
| 0.678906
|
79502b0f05c5277769186e77b2ddc9652987d044
| 267
|
py
|
Python
|
listings/urls.py
|
snape-here/Real-Estate-Website
|
ac6b32cc641ee4c096b74792916baea1d1e83bdc
|
[
"MIT"
] | 1
|
2021-05-27T20:25:56.000Z
|
2021-05-27T20:25:56.000Z
|
listings/urls.py
|
snape-here/Real-Estate-Website
|
ac6b32cc641ee4c096b74792916baea1d1e83bdc
|
[
"MIT"
] | null | null | null |
listings/urls.py
|
snape-here/Real-Estate-Website
|
ac6b32cc641ee4c096b74792916baea1d1e83bdc
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.urls.resolvers import URLPattern
from . import views
urlpatterns = [
path('', views.index, name='listings'),
path('<int:listing_id>', views.listing, name='listing'),
path('search', views.search, name='search'),
]
| 26.7
| 60
| 0.696629
|
79502c65cfec07129aba061182a628864cfcb16a
| 2,600
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/acs/_consts.py
|
jaredwelsh/azure-cli
|
ede8fe0a85543b7e52747e61d99955e916b95a5a
|
[
"MIT"
] | 1
|
2020-12-14T18:11:39.000Z
|
2020-12-14T18:11:39.000Z
|
src/azure-cli/azure/cli/command_modules/acs/_consts.py
|
jaredwelsh/azure-cli
|
ede8fe0a85543b7e52747e61d99955e916b95a5a
|
[
"MIT"
] | 1
|
2020-11-06T02:53:03.000Z
|
2020-11-06T02:53:03.000Z
|
src/azure-cli/azure/cli/command_modules/acs/_consts.py
|
jaredwelsh/azure-cli
|
ede8fe0a85543b7e52747e61d99955e916b95a5a
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
CONST_OUTBOUND_TYPE_LOAD_BALANCER = "loadBalancer"
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING = "userDefinedRouting"
CONST_SCALE_SET_PRIORITY_REGULAR = "Regular"
CONST_SCALE_SET_PRIORITY_SPOT = "Spot"
CONST_SPOT_EVICTION_POLICY_DELETE = "Delete"
CONST_SPOT_EVICTION_POLICY_DEALLOCATE = "Deallocate"
CONST_OS_DISK_TYPE_MANAGED = "Managed"
CONST_OS_DISK_TYPE_EPHEMERAL = "Ephemeral"
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME = "httpApplicationRouting"
CONST_MONITORING_ADDON_NAME = "omsagent"
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = "logAnalyticsWorkspaceResourceID"
CONST_VIRTUAL_NODE_ADDON_NAME = "aciConnector"
CONST_VIRTUAL_NODE_SUBNET_NAME = "SubnetName"
CONST_KUBE_DASHBOARD_ADDON_NAME = "kubeDashboard"
CONST_AZURE_POLICY_ADDON_NAME = "azurepolicy"
# IngressApplicaitonGateway configuration keys
CONST_INGRESS_APPGW_ADDON_NAME = "ingressApplicationGateway"
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME = "applicationGatewayName"
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID = "applicationGatewayId"
CONST_INGRESS_APPGW_SUBNET_ID = "subnetId"
CONST_INGRESS_APPGW_SUBNET_CIDR = "subnetCIDR"
CONST_INGRESS_APPGW_WATCH_NAMESPACE = "watchNamespace"
# confcom addon keys
CONST_CONFCOM_ADDON_NAME = "ACCSGXDevicePlugin"
CONST_ACC_SGX_QUOTE_HELPER_ENABLED = "ACCSGXQuoteHelperEnabled"
# private dns zone mode
CONST_PRIVATE_DNS_ZONE_SYSTEM = "system"
CONST_PRIVATE_DNS_ZONE_NONE = "none"
# Open Service Mesh addon keys
CONST_OPEN_SERVICE_MESH_ADDON_NAME = "openServiceMesh"
ADDONS = {
'http_application_routing': CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
'monitoring': CONST_MONITORING_ADDON_NAME,
'virtual-node': CONST_VIRTUAL_NODE_ADDON_NAME,
'kube-dashboard': CONST_KUBE_DASHBOARD_ADDON_NAME,
'azure-policy': CONST_AZURE_POLICY_ADDON_NAME,
'ingress-appgw': CONST_INGRESS_APPGW_ADDON_NAME,
"confcom": CONST_CONFCOM_ADDON_NAME,
'open-service-mesh': CONST_OPEN_SERVICE_MESH_ADDON_NAME,
}
CONST_CANIPULL_IMAGE = "mcr.microsoft.com/aks/canipull:0.0.3-alpha"
CONST_MANAGED_IDENTITY_OPERATOR_ROLE = 'Managed Identity Operator'
CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID = 'f1a07417-d97a-45cb-824c-7a7467783830'
# decorator mode
class DecoratorMode(Enum):
CREATE = 1
UPDATE = 2
| 38.235294
| 94
| 0.780769
|
79502cb4202ba569a2eba82d0ca42f82cb61382c
| 170
|
py
|
Python
|
zenqueue/queue/sync.py
|
zacharyvoase/zenqueue
|
ffd2d87fa2423339423abe606312b3046c4efefe
|
[
"MIT"
] | 6
|
2015-11-05T13:36:19.000Z
|
2019-12-08T14:08:38.000Z
|
zenqueue/queue/sync.py
|
zacharyvoase/zenqueue
|
ffd2d87fa2423339423abe606312b3046c4efefe
|
[
"MIT"
] | null | null | null |
zenqueue/queue/sync.py
|
zacharyvoase/zenqueue
|
ffd2d87fa2423339423abe606312b3046c4efefe
|
[
"MIT"
] | 2
|
2015-09-18T14:59:22.000Z
|
2019-03-26T08:59:08.000Z
|
# -*- coding: utf-8 -*-
from zenqueue.queue.common import AbstractQueue
from zenqueue.utils.sync import Semaphore
Queue = AbstractQueue.with_semaphore_class(Semaphore)
| 24.285714
| 53
| 0.794118
|
79502ccc84c2b30dab7739d27002046b2f1c74dc
| 2,631
|
py
|
Python
|
retargeting/test.py
|
lcy2080/deep-motion-editing
|
7872a076dbae8b3290b0dc94c8eac04159aef65e
|
[
"BSD-2-Clause"
] | null | null | null |
retargeting/test.py
|
lcy2080/deep-motion-editing
|
7872a076dbae8b3290b0dc94c8eac04159aef65e
|
[
"BSD-2-Clause"
] | null | null | null |
retargeting/test.py
|
lcy2080/deep-motion-editing
|
7872a076dbae8b3290b0dc94c8eac04159aef65e
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from os.path import join as pjoin
from get_error import full_batch
import numpy as np
from option_parser import try_mkdir
from eval import eval
import argparse
import platform
platform_name = platform.system()
def batch_copy(source_path, suffix, dest_path, dest_suffix=None):
try_mkdir(dest_path)
files = [f for f in os.listdir(source_path) if f.endswith('_{}.bvh'.format(suffix))]
length = len('_{}.bvh'.format(suffix))
for f in files:
if dest_suffix is not None:
cmd = '{} \"{}\" \"{}\"'.format("copy" if platform_name == "Windows" else "cp", os.path.join(source_path, f), os.path.join(dest_path, f[:-length] + '_{}.bvh'.format(dest_suffix)))
else:
cmd = '{} \"{}\" \"{}\"'.format("copy" if platform_name == "Windows" else "cp",os.path.join(source_path, f), os.path.join(dest_path, f[:-length] + '.bvh'))
os.system(cmd)
if __name__ == '__main__':
test_characters = ['Mousey_m', 'Goblin_m', 'Mremireh_m', 'Vampire_m']
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='./pretrained/')
args = parser.parse_args()
prefix = args.save_dir
cross_dest_path = pjoin(prefix, 'results/cross_structure/')
intra_dest_path = pjoin(prefix, 'results/intra_structure/')
source_path = pjoin(prefix, 'results/bvh/')
cross_error = []
intra_error = []
for i in range(4):
print('Batch [{}/4]'.format(i + 1))
eval(i, prefix)
print('Collecting test error...')
if i == 0:
cross_error += full_batch(0, prefix)
for char in test_characters:
batch_copy(os.path.join(source_path, char), 0, os.path.join(cross_dest_path, char))
batch_copy(os.path.join(source_path, char), 'gt', os.path.join(cross_dest_path, char), 'gt')
intra_dest = os.path.join(intra_dest_path, 'from_{}'.format(test_characters[i]))
for char in test_characters:
for char in test_characters:
batch_copy(os.path.join(source_path, char), 1, os.path.join(intra_dest, char))
batch_copy(os.path.join(source_path, char), 'gt', os.path.join(intra_dest, char), 'gt')
intra_error += full_batch(1, prefix)
cross_error = np.array(cross_error)
intra_error = np.array(intra_error)
cross_error_mean = cross_error.mean()
intra_error_mean = intra_error.mean()
os.system('rm -r %s' % pjoin(prefix, 'results/bvh'))
print('Intra-retargeting error:', intra_error_mean)
print('Cross-retargeting error:', cross_error_mean)
print('Evaluation finished!')
| 37.056338
| 191
| 0.646142
|
79502d0be18dad51aa55b9a58457eba20ecb7cd2
| 2,577
|
py
|
Python
|
Timer.py
|
rlowrance/re-avm
|
d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2
|
[
"BSD-3-Clause"
] | 25
|
2016-10-07T05:08:15.000Z
|
2022-03-22T01:36:51.000Z
|
Timer.py
|
rlowrance/re-avm
|
d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2
|
[
"BSD-3-Clause"
] | 1
|
2021-01-14T22:27:23.000Z
|
2021-01-14T22:27:23.000Z
|
Timer.py
|
rlowrance/re-avm
|
d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2
|
[
"BSD-3-Clause"
] | 8
|
2016-08-12T07:26:29.000Z
|
2021-07-05T01:22:42.000Z
|
import atexit
import os
import pdb
import time
class Timer(object):
def __init__(self):
# time.clock() returns:
# unix ==> processor time in seconds as float (cpu time)
# windows ==> wall-clock seconds since first call to this function
# NOTE: time.clock() is deprecated in python 3.3
self._program_start_clock = time.clock() # processor time in seconds
# time.time() returns:
# unit & windows ==> time in seconds since epoch as float
self._program_start_time = time.time() # time in seconds since the epoch (on Unix)
self._program = (self._program_start_clock, self._program_start_time)
self._lap = (self._program_start_clock, self._program_start_time)
atexit.register(self.endlaps)
# initial API
def elapsed_cpu_seconds(self):
return time.clock() - self._program_start_clock
def elapsed_wallclock_seconds(self):
return time.time() - self._program_start_time
# second API (keep first for backwards compatibility)
def clock_time(self):
return (time.clock(), time.time())
def lap(self, s, verbose=True):
'return (cpu seconds, wall clock seconds) in last lap; maybe print time of current lap'
# NOTE: Cannot use the python standard library to find the elapsed CPU time on Windows
# instead, Windows returns the wall clock time
# inspired by Paul McGuire's timing.py
# ref: http://stackoverflow.com/questions/1557571/how-to-get-time-of-a-python-program-execution
def toStr(t):
'convert seconds to hh:mm:ss.sss'
# this code from Paul McGuire!
return '%d:%02d:%02d.%03d' % reduce(lambda ll, b: divmod(ll[0], b) + ll[1:],
[(t * 1000,), 1000, 60, 60])
def diff(start, now):
return (
toStr(now[0] - start[0]),
toStr(now[1] - start[1])
)
clock_time = self.clock_time()
cumulative_seconds = diff(self._program, clock_time)
lap_seconds = diff(self._lap, clock_time)
self._lap = clock_time # reset lap time
if verbose:
visual_clue = '=' * 50
print visual_clue
print 'lap: %s' % s
print 'cumulative %s cpu %s wallclock' % cumulative_seconds
print 'lap %s cpu %s wallclock' % lap_seconds
print visual_clue
print
return lap_seconds
def endlaps(self):
self.lap('**End Program**')
| 37.897059
| 103
| 0.599922
|
79502d7e78beeebddbee5c647692615691ce608d
| 7,711
|
py
|
Python
|
customhelp/themes/justcore.py
|
junaidrehaan/npc-cogs
|
dd1e22a6f333f3c349c9e095a0a6716a75084801
|
[
"MIT"
] | null | null | null |
customhelp/themes/justcore.py
|
junaidrehaan/npc-cogs
|
dd1e22a6f333f3c349c9e095a0a6716a75084801
|
[
"MIT"
] | null | null | null |
customhelp/themes/justcore.py
|
junaidrehaan/npc-cogs
|
dd1e22a6f333f3c349c9e095a0a6716a75084801
|
[
"MIT"
] | null | null | null |
from packaging import version
from redbot import __version__
from redbot.core.utils.chat_formatting import (box, humanize_list,
humanize_number)
from ..abc import ThemesMeta
from ..core.base_help import (CategoryConvert, Context, EmbedField,
HelpSettings, _, cast, commands, get_cooldowns,
get_perms, pagify, shorten_line)
class JustCore(ThemesMeta):
"""This is the raw core help, but with categories"""
async def format_category_help(
self,
ctx: Context,
obj: CategoryConvert,
help_settings: HelpSettings,
get_pages: bool = False,
**kwargs,
):
coms = await self.get_category_help_mapping(
ctx, obj, help_settings=help_settings, **kwargs
)
if not coms:
return
if await ctx.embed_requested():
emb = await self.embed_template(help_settings, ctx)
if description := obj.long_desc:
emb["embed"]["title"] = f"{description[:250]}"
for cog_name, data in coms:
title = f"**__{cog_name}:__**"
cog_text = "\n".join(
shorten_line(f"**{name}** {command.format_shortdoc_for_context(ctx)}")
for name, command in sorted(data.items())
)
for i, page in enumerate(pagify(cog_text, page_length=1000, shorten_by=0)):
title = title if i < 1 else _("{title} (continued)").format(title=title)
field = EmbedField(title, page, False)
emb["fields"].append(field)
pages = await self.make_embeds(ctx, emb, help_settings=help_settings)
if get_pages:
return pages
else:
await self.send_pages(
ctx,
pages,
embed=True,
help_settings=help_settings,
)
else:
await ctx.send(_("You need to enable embeds to use the help menu"))
async def format_cog_help(self, ctx: Context, obj: commands.Cog, help_settings: HelpSettings):
coms = await self.get_cog_help_mapping(ctx, obj, help_settings=help_settings)
if not (coms or help_settings.verify_exists):
return
if await ctx.embed_requested():
emb = await self.embed_template(help_settings, ctx, obj.format_help_for_context(ctx))
if coms:
command_text = "\n".join(
shorten_line(f"**{name}** {command.format_shortdoc_for_context(ctx)}")
for name, command in sorted(coms.items())
)
for i, page in enumerate(pagify(command_text, page_length=500, shorten_by=0)):
if i == 0:
title = _("**__Commands:__**")
else:
title = _("**__Commands:__** (continued)")
field = EmbedField(title, page, False)
emb["fields"].append(field)
pages = await self.make_embeds(ctx, emb, help_settings=help_settings)
await self.send_pages(
ctx,
pages,
embed=True,
help_settings=help_settings,
)
else:
await ctx.send(_("You need to enable embeds to use the help menu"))
async def format_command_help(
self, ctx: Context, obj: commands.Command, help_settings: HelpSettings
):
send = help_settings.verify_exists
if not send:
async for __ in self.help_filter_func(
ctx, (obj,), bypass_hidden=True, help_settings=help_settings
):
send = True
if not send:
return
command = obj
signature = _(
"Syntax: {ctx.clean_prefix}{command.qualified_name} {command.signature}"
).format(ctx=ctx, command=command)
# Backward compatible.
if version.parse(__version__) >= version.parse("3.4.6"):
aliases = command.aliases
if help_settings.show_aliases and aliases:
alias_fmt = _("Aliases") if len(command.aliases) > 1 else _("Alias")
aliases = sorted(aliases, key=len)
a_counter = 0
valid_alias_list = []
for alias in aliases:
if (a_counter := a_counter + len(alias)) < 500:
valid_alias_list.append(alias)
else:
break
a_diff = len(aliases) - len(valid_alias_list)
aliases_list = [
f"{ctx.clean_prefix}{command.parent.qualified_name + ' ' if command.parent else ''}{alias}"
for alias in valid_alias_list
]
if len(valid_alias_list) < 10:
aliases_content = humanize_list(aliases_list)
else:
aliases_formatted_list = ", ".join(aliases_list)
if a_diff > 1:
aliases_content = _("{aliases} and {number} more aliases.").format(
aliases=aliases_formatted_list, number=humanize_number(a_diff)
)
else:
aliases_content = _("{aliases} and one more alias.").format(
aliases=aliases_formatted_list
)
signature += f"\n{alias_fmt}: {aliases_content}"
subcommands = None
if hasattr(command, "all_commands"):
grp = cast(commands.Group, command)
subcommands = await self.get_group_help_mapping(ctx, grp, help_settings=help_settings)
if await ctx.embed_requested():
emb = await self.embed_template(
help_settings, ctx, command.format_help_for_context(ctx)
)
if description := command.description:
emb["embed"]["title"] = f"{description[:250]}"
emb["embed"]["description"] = box(signature)
if final_perms := get_perms(command):
emb["fields"].append(EmbedField("Permissions", final_perms, False))
if cooldowns := get_cooldowns(command):
emb["fields"].append(EmbedField("Cooldowns:", "\n".join(cooldowns), False))
if subcommands:
def shorten_line(a_line: str) -> str:
if len(a_line) < 70: # embed max width needs to be lower
return a_line
return a_line[:67] + "..."
subtext = "\n".join(
shorten_line(f"**{name}** {command.format_shortdoc_for_context(ctx)}")
for name, command in sorted(subcommands.items())
)
for i, page in enumerate(pagify(subtext, page_length=500, shorten_by=0)):
if i == 0:
title = _("**__Subcommands:__**")
else:
title = _("**__Subcommands:__** (continued)")
field = EmbedField(title, page, False)
emb["fields"].append(field)
pages = await self.make_embeds(ctx, emb, help_settings=help_settings)
await self.send_pages(
ctx,
pages,
embed=True,
help_settings=help_settings,
)
else:
await ctx.send(_("You need to enable embeds to use the help menu"))
| 40.161458
| 111
| 0.520425
|
79502f3ed57ba967bbce22ce2b2b0391e6490992
| 535
|
py
|
Python
|
website/example_problem_graders/base.py
|
pshen24/cmimc-online
|
7d2435e506381fa19f3512635eb615f7a02e5f03
|
[
"MIT"
] | null | null | null |
website/example_problem_graders/base.py
|
pshen24/cmimc-online
|
7d2435e506381fa19f3512635eb615f7a02e5f03
|
[
"MIT"
] | null | null | null |
website/example_problem_graders/base.py
|
pshen24/cmimc-online
|
7d2435e506381fa19f3512635eb615f7a02e5f03
|
[
"MIT"
] | null | null | null |
class BaseGrader:
def __init__(self, problem):
self.problem = problem
def grade(self, submission, score):
'''
Assigns a point value to the submission, and updates the
corresponding score and competitor's total score
Returns: None
'''
raise NotImplementedError()
def validate(self, user_input):
'''
Checks whether the user's input is in an acceptable format
Returns: bool
'''
raise NotImplementedError()
| 25.47619
| 67
| 0.585047
|
79502f804dd78a88f3370c07e27f6b051b927786
| 2,048
|
py
|
Python
|
dohq_teamcity/models/state_field.py
|
DenKoren/teamcity
|
69acb4d1402c316129b4602882a9cce2d55cf926
|
[
"MIT"
] | 23
|
2018-10-19T07:28:45.000Z
|
2021-11-12T12:46:09.000Z
|
dohq_teamcity/models/state_field.py
|
DenKoren/teamcity
|
69acb4d1402c316129b4602882a9cce2d55cf926
|
[
"MIT"
] | 31
|
2018-10-16T05:53:11.000Z
|
2021-09-09T14:44:14.000Z
|
dohq_teamcity/models/state_field.py
|
DenKoren/teamcity
|
69acb4d1402c316129b4602882a9cce2d55cf926
|
[
"MIT"
] | 12
|
2018-10-28T23:00:17.000Z
|
2021-09-07T12:07:13.000Z
|
# coding: utf-8
from dohq_teamcity.custom.base_model import TeamCityObject
class StateField(TeamCityObject):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'value': 'bool',
'inherited': 'bool'
}
attribute_map = {
'value': 'value',
'inherited': 'inherited'
}
def __init__(self, value=False, inherited=False, teamcity=None): # noqa: E501
"""StateField - a model defined in Swagger""" # noqa: E501
self._value = None
self._inherited = None
self.discriminator = None
if value is not None:
self.value = value
if inherited is not None:
self.inherited = inherited
super(StateField, self).__init__(teamcity=teamcity)
@property
def value(self):
"""Gets the value of this StateField. # noqa: E501
:return: The value of this StateField. # noqa: E501
:rtype: bool
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this StateField.
:param value: The value of this StateField. # noqa: E501
:type: bool
"""
self._value = value
@property
def inherited(self):
"""Gets the inherited of this StateField. # noqa: E501
:return: The inherited of this StateField. # noqa: E501
:rtype: bool
"""
return self._inherited
@inherited.setter
def inherited(self, inherited):
"""Sets the inherited of this StateField.
:param inherited: The inherited of this StateField. # noqa: E501
:type: bool
"""
self._inherited = inherited
| 24.674699
| 82
| 0.582031
|
795030089c7969b5c8308f008d6cb8c7b569fc87
| 6,916
|
py
|
Python
|
pyeit/io/et4.py
|
DavidMetzIMT/pyEIT
|
a3c64f7b869e7a00a102fc93feea4999c8bed6d1
|
[
"BSD-3-Clause"
] | null | null | null |
pyeit/io/et4.py
|
DavidMetzIMT/pyEIT
|
a3c64f7b869e7a00a102fc93feea4999c8bed6d1
|
[
"BSD-3-Clause"
] | null | null | null |
pyeit/io/et4.py
|
DavidMetzIMT/pyEIT
|
a3c64f7b869e7a00a102fc93feea4999c8bed6d1
|
[
"BSD-3-Clause"
] | null | null | null |
# pylint: disable=no-member, invalid-name
# pylint: disable=too-many-arguments, too-many-instance-attributes
"""
Load .et4 file into mem (experimental).
This file structure may be modified in near future.
"""
# Copyright (c) Benyuan Liu. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from struct import unpack
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class ET4:
""".et4 file loader"""
def __init__(
self, file_name, ex_mtx=None, step=1, compatible=False, output_resistor=False
):
"""
initialize .et4 handler.
.et4 is an experimental file format for XEIT-ng system
try to read data and parse FILE HEADER
[-- 128 uint parameters + (256 float RE) + (256 float IM) --]
Parameters
----------
file_name : basestring
ex_mtx : NDArray
num_lines x 2 array
step : int
architecture of voltage meter
compatible : bool
if data output needs to be .et3 compatible
output_resistor : bool
convert voltage to current
Returns
-------
NDArray
return data (complex valued)
Notes
-----
in .et4, a frame consists (16) excitations, and in each excitation
ADC samples are arranged as AD1, AD2, ..., AD16, consecutively. so
(AD1, AD2, ..., AD16) (AD1, AD2, ..., AD16) ... (AD1, AD2, ..., AD16)
"see hardware section in eit.pdf, benyuan liu."
when the excitation electrodes are marked as A, B,
in order to be compatible with .et3, you should
1. automatically zero-out 4 measures where A, A-1, B, B-1
2. rearrange all the measures so that in each excitation,
the first measurement always start from (include) 'A'
3. divide 'diff Voltage' / Current = Resistor (optional)
"""
self.file_name = file_name
self.ex_mtx = ex_mtx
self.step = step
self.compatible = compatible
self.output_resistor = output_resistor
# 1. get .et4 file length
nbytes = et4_tell(self.file_name)
# 2. get nframes (a frame = (256 + 256d + 256d) = 5120 Bytes)
self.info_num = 256
self.data_num = 256 * 2
self.header_size = self.info_num * 4
self.frame_size = self.header_size + self.data_num * 8
self.nframe = int((nbytes) / (self.frame_size))
# 3. load data
self.data = self.load()
def load(self):
"""load RAW data"""
# 1. prepare storage
x = np.zeros((self.nframe, self.data_num), dtype=np.double)
# 3. unpack data and extract parameters
with open(self.file_name, "rb") as fh:
for i in range(self.nframe):
d = fh.read(self.frame_size)
x[i] = np.array(unpack("512d", d[self.header_size :]))
data = x[:, :256] + 1j * x[:, 256:]
# electrode re-arranged the same as .et3 file
if self.compatible:
v_index, c_index = zero_rearrange_index(self.ex_mtx)
dout = data[:, v_index]
if self.output_resistor:
# number of diff_V per stimulation
M = int(len(v_index) / len(c_index))
# current index is the same in each M measurements
cm_index = np.repeat(c_index, M)
# R = diff_V / I
dout = dout / data[:, cm_index]
else:
dout = data
return dout
def load_info(self):
"""load info headers from xEIT"""
# 1. prepare storage
info = np.zeros((self.nframe, 256))
# 3. unpack data and extract parameters
with open(self.file_name, "rb") as fh:
for i in range(self.nframe):
d = fh.read(self.frame_size)
info[i, :] = np.array(unpack("33if222i", d[: self.header_size]))
return info
def to_df(self, resample=None, rel_date=None, fps=20):
"""convert raw data to pandas.DataFrame"""
if rel_date is None:
rel_date = "2019/01/01"
ta = np.arange(self.nframe) * 1.0 / fps
ts = pd.to_datetime(rel_date) + pd.to_timedelta(ta, "s")
df = pd.DataFrame(self.data, index=ts)
# resample
if resample is not None:
df = df.resample(resample).mean()
return df
def to_csv(self):
"""save file to csv"""
raise NotImplementedError()
def et4_tell(fstr):
"""check the filetype of et4"""
with open(fstr, "rb") as fh:
fh.seek(0, 2) # move the cursor to the end (2) of the file
file_len = fh.tell()
return file_len
def zero_rearrange_index(ex_mtx):
"""
(default mode: opposition stimulation)
0. excitation electrodes are denoted by 'A' and 'B'
1. for each excitation, REARRANGE all the data start from 'A'
2. zero all the channels of A, A-1, B, B-1
returns : re-ordered non-zero index, current index
"""
if ex_mtx is None:
num_lines, num_el, el_dist = 16, 16, 8
ab_scan = False
else:
num_lines, num_el = ex_mtx.shape
ab_scan = True
v_index, c_index = [], [] # non-zero diff-pairs and current values
for k in range(num_lines):
if ab_scan:
ex_pat = ex_mtx[k, :].ravel()
a = np.where(ex_pat == 1)[0][0]
b = np.where(ex_pat == -1)[0][0]
else:
a = k # positive excitation
b = (a + el_dist) % num_el # negative excitation
ap = (a - 1) % num_el # positive adjacent
bp = (b - 1) % num_el # negative adjacent
# print(A, B, Ap, Bp)
c_index.append(k * num_el + b)
for i in range(num_el):
# re-order data start after A
j = (i + a) % num_el
if j not in (a, b, ap, bp):
v_index.append(k * num_el + j)
return v_index, c_index
if __name__ == "__main__":
# .et4 file
et_file = r"C:\xeit\eit_20190929-103342.et4"
# et_file = "../../datasets/s00-02.et4"
# load data
et4 = ET4(et_file, compatible=True, output_resistor=False)
et4_data = et4.data
print(et4_data.shape)
ti = et4_data.sum(axis=1) / 192.0
ti_real = np.real(ti)
ti_imag = np.imag(ti)
ti_abs = np.sqrt(ti_real**2 + ti_imag**2)
print("max = ", np.max(ti_abs))
print("min = ", np.min(ti_abs))
xlim = 1000
if ti_abs.shape[0] < 1000:
xlim = ti_abs.shape[0]
# plot
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(211)
ax.plot(ti_real, "b-")
axt = ax.twinx()
axt.plot(ti_imag, "r-")
ax.set_xlim([0, xlim])
ax.grid(True)
ax2 = fig.add_subplot(212)
ax2.plot(ti_abs, "r-")
ax2.grid(True)
ax2.set_xlim([0, xlim])
plt.show()
# fig.savefig('s00-03-80k.png')
| 31.013453
| 85
| 0.568103
|
79503018ac296e095baa91de3d0464cc7956adb8
| 374
|
py
|
Python
|
base64/test.py
|
miloyip/benchmarks
|
e338363f2d3c35e8ff834c65d38d19f3108863b3
|
[
"MIT"
] | 2
|
2020-01-09T02:44:27.000Z
|
2022-03-12T01:35:44.000Z
|
base64/test.py
|
miloyip/benchmarks
|
e338363f2d3c35e8ff834c65d38d19f3108863b3
|
[
"MIT"
] | null | null | null |
base64/test.py
|
miloyip/benchmarks
|
e338363f2d3c35e8ff834c65d38d19f3108863b3
|
[
"MIT"
] | 3
|
2015-12-18T06:59:16.000Z
|
2020-03-19T06:49:25.000Z
|
import base64, time
STR_SIZE = 10000000
TRIES = 100
str = "a" * STR_SIZE
str2 = ""
t, s = time.time(), 0
for _ in range(0, TRIES):
str2 = base64.b64encode(str)
s += len(str2)
print("encode: {0}, {1}".format(s, time.time() - t))
t, s = time.time(), 0
for _ in range(0, TRIES):
s += len(base64.b64decode(str2))
print("decode: {0}, {1}".format(s, time.time() - t))
| 19.684211
| 52
| 0.596257
|
7950305db4e7bcbdc7503ab0f86c072a3287da1b
| 4,868
|
py
|
Python
|
docs/conf.py
|
lenarother/santa-helpers
|
0498b9922b357c98543929a39d9755085da527b0
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
lenarother/santa-helpers
|
0498b9922b357c98543929a39d9755085da527b0
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
lenarother/santa-helpers
|
0498b9922b357c98543929a39d9755085da527b0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# santa_helpers documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import santa_helpers
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'santa-helpers'
copyright = "2022, Magdalena Rother"
author = "Magdalena Rother"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = santa_helpers.__version__
# The full version, including alpha/beta/rc tags.
release = santa_helpers.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'santa_helpersdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'santa_helpers.tex',
'santa-helpers Documentation',
'Magdalena Rother', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'santa_helpers',
'santa-helpers Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'santa_helpers',
'santa-helpers Documentation',
author,
'santa_helpers',
'One line description of project.',
'Miscellaneous'),
]
| 29.865031
| 77
| 0.688989
|
795031021b8e26cb347a86141cbac69062efcc76
| 1,285
|
py
|
Python
|
realtime_hand_3d/segmentation/utils/metrics.py
|
NeelayS/realtime_hand
|
219c772b9b7df60c390edac7da23f9cdddebca4d
|
[
"MIT"
] | null | null | null |
realtime_hand_3d/segmentation/utils/metrics.py
|
NeelayS/realtime_hand
|
219c772b9b7df60c390edac7da23f9cdddebca4d
|
[
"MIT"
] | null | null | null |
realtime_hand_3d/segmentation/utils/metrics.py
|
NeelayS/realtime_hand
|
219c772b9b7df60c390edac7da23f9cdddebca4d
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def compute_iou(pred, target, is_idx=None):
n_classes = len(np.unique(target.cpu().data.numpy()))
if n_classes == 1:
pred_unique_np = np.unique(pred.cpu().data.numpy())
if len(pred_unique_np) == 1 and pred_unique_np[0] == 0:
return np.array([1.0])
else:
return np.array([0.0])
ious = []
if not pred.shape[2] == target.shape[1]:
pred = nn.functional.interpolate(
pred,
size=(target.shape[1], target.shape[2]),
mode="bilinear",
align_corners=True,
)
if not is_idx:
pred = torch.argmax(pred, dim=1)
pred = pred.view(-1)
target = target.view(-1)
for cls in range(1, n_classes):
pred_inds = pred == cls
target_inds = target == cls
intersection = (pred_inds[target_inds]).long().sum().data.cpu().item()
union = (
pred_inds.long().sum().data.cpu().item()
+ target_inds.long().sum().data.cpu().item()
- intersection
)
if union == 0:
ious.append(1.0)
else:
ious.append(float(intersection) / float(union))
return np.array(ious)
| 26.770833
| 78
| 0.550195
|
7950315a2b5f683d289359ab63cce16cec9c6871
| 262
|
py
|
Python
|
python/train_fasttext.py
|
hsiaoko/REENet
|
0763c7c6992a449e8e7ee77e889bb88e98faa389
|
[
"MIT"
] | 1
|
2021-03-02T14:26:52.000Z
|
2021-03-02T14:26:52.000Z
|
python/train_fasttext.py
|
hsiaoko/REENet
|
0763c7c6992a449e8e7ee77e889bb88e98faa389
|
[
"MIT"
] | null | null | null |
python/train_fasttext.py
|
hsiaoko/REENet
|
0763c7c6992a449e8e7ee77e889bb88e98faa389
|
[
"MIT"
] | null | null | null |
import fasttext
if __name__ == '__main__':
corpus_pth = "../corpus/dblp_acm/dblp_acm.title.csv"
model_pth = "../models/embeding/dblp_acm_title.bin"
model = fasttext.train_unsupervised(corpus_pth)
print(model.words)
model.save_model(model_pth)
| 37.428571
| 56
| 0.732824
|
795032bb0ecff99121df7274f4717f56ce7ae0ce
| 546
|
py
|
Python
|
leetcode/binary_tree_preorder_traversal.py
|
alexandru-dinu/competitive-programming
|
4515d221a649b3ab8bc012d01f38b9e4659e2e76
|
[
"MIT"
] | null | null | null |
leetcode/binary_tree_preorder_traversal.py
|
alexandru-dinu/competitive-programming
|
4515d221a649b3ab8bc012d01f38b9e4659e2e76
|
[
"MIT"
] | 6
|
2021-10-12T09:14:30.000Z
|
2021-10-16T19:29:08.000Z
|
leetcode/binary_tree_preorder_traversal.py
|
alexandru-dinu/competitive-programming
|
4515d221a649b3ab8bc012d01f38b9e4659e2e76
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/binary-tree-preorder-traversal
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def preorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
if root is None:
return []
acc = [root.val]
acc.extend(self.preorderTraversal(root.left))
acc.extend(self.preorderTraversal(root.right))
return acc
| 28.736842
| 71
| 0.626374
|
795032c75c590697b381b1d3cab54ceec477988d
| 17,664
|
py
|
Python
|
cfgrib/eccodes.py
|
alexamici/cfgrib
|
6536825ede61bbc61b7b51b827ec0c41efe9d0ee
|
[
"Apache-2.0"
] | null | null | null |
cfgrib/eccodes.py
|
alexamici/cfgrib
|
6536825ede61bbc61b7b51b827ec0c41efe9d0ee
|
[
"Apache-2.0"
] | null | null | null |
cfgrib/eccodes.py
|
alexamici/cfgrib
|
6536825ede61bbc61b7b51b827ec0c41efe9d0ee
|
[
"Apache-2.0"
] | 1
|
2020-05-18T21:59:22.000Z
|
2020-05-18T21:59:22.000Z
|
#
# Copyright 2017-2018 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import bytes, float, int, isinstance
from future.utils import raise_from
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise_from(RuntimeError(self.message), self.exc)
try:
lib = ffi.dlopen('eccodes')
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'libeccodes library not found on the system.')
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class EcCodesError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(EcCodesError, self).__init__(message, code, *args)
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
raise EcCodesError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
raise EcCodesError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_index_new_from_file(path, keys):
# type: (bytes, T.Iterable[bytes]) -> cffi.FFI.CData
keys_enc = b','.join(keys)
return check_last(lib.codes_index_new_from_file)(ffi.NULL, path, keys_enc)
def codes_handle_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB):
try:
retval = check_last(lib.codes_handle_new_from_file)(ffi.NULL, fileobj, product_kind)
if retval == ffi.NULL:
return None
else:
return retval
except EcCodesError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("File object is empty: %r" % fileobj)
raise
def codes_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB):
if product_kind == lib.PRODUCT_GRIB:
return codes_handle_new_from_file(fileobj, product_kind)
raise Exception("Invalid product kind: %r" % product_kind)
codes_index_delete = lib.codes_index_delete
codes_handle_delete = lib.codes_handle_delete
def codes_new_from_index(indexid):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
return check_last(lib.codes_handle_new_from_index)(indexid)
def codes_index_get_size(indexid, key):
# type: (cffi.FFI.CData, bytes) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
codes_index_get_size = check_return(lib.codes_index_get_size)
codes_index_get_size(indexid, key, size)
return size[0]
def codes_index_get_long(indexid, key):
# type: (cffi.FFI.CData, bytes) -> T.List[int]
"""
Get the list of integer values associated to a key.
The index must be created with such a key (possibly together with other
keys).
:param bytes key: the keyword whose list of values has to be retrieved
:rtype: List(int)
"""
size = codes_index_get_size(indexid, key)
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
check_return(lib.codes_index_get_long)(indexid, key, values, size_p)
return list(values)
def codes_index_get_double(indexid, key):
# type: (cffi.FFI.CData, bytes) -> T.List[float]
"""
Get the list of double values associated to a key.
The index must be created with such a key (possibly together with other
keys).
:param bytes key: the keyword whose list of values has to be retrieved
:rtype: List(int)
"""
size = codes_index_get_size(indexid, key)
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
check_return(lib.codes_index_get_double)(indexid, key, values, size_p)
return list(values)
def codes_index_get_string(indexid, key, length=256):
# type: (cffi.FFI.CData, bytes, int) -> T.List[bytes]
"""
Get the list of string values associated to a key.
The index must be created with such a key (possibly together with other
keys).
:param bytes key: the keyword whose list of values has to be retrieved
:rtype: List(int)
"""
size = codes_index_get_size(indexid, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('const char *[]', values_keepalive)
size_p = ffi.new('size_t *', size)
codes_index_get_string = check_return(lib.codes_index_get_string)
codes_index_get_string(indexid, key, values, size_p)
return [ffi.string(values[i]) for i in range(size_p[0])]
def codes_index_get(indexid, key, ktype=bytes):
# type: (cffi.FFI.CData, bytes, type) -> list
if ktype is int:
result = codes_index_get_long(indexid, key) # type: T.List[T.Any]
elif ktype is float:
result = codes_index_get_double(indexid, key)
elif ktype is bytes:
result = codes_index_get_string(indexid, key)
else:
raise TypeError("ktype not supported %r" % ktype)
return result
def codes_index_get_autotype(indexid, key):
# type: (cffi.FFI.CData, bytes) -> list
try:
return codes_index_get_long(indexid, key)
except EcCodesError:
pass
try:
return codes_index_get_double(indexid, key)
except EcCodesError:
return codes_index_get_string(indexid, key)
def codes_index_select_long(indexid, key, value):
# type: (cffi.FFI.CData, bytes, int) -> None
"""
Properly fix the index on a specific integer value of key. The key must
be one of those the index has been endowed with.
:param bytes key: the key to select
:param int value: the value which has to be selected to use the index
"""
codes_index_select_long = check_return(lib.codes_index_select_long)
codes_index_select_long(indexid, key, value)
def codes_index_select_double(indexid, key, value):
# type: (cffi.FFI.CData, bytes, float) -> None
"""
Properly fix the index on a specific float value of key. The key must
be one of those the index has been endowed with.
:param bytes key: the key to select
:param float value: the value which has to be selected to use the index
"""
codes_index_select_double = check_return(lib.codes_index_select_double)
codes_index_select_double(indexid, key, value)
def codes_index_select_string(indexid, key, value):
# type: (cffi.FFI.CData, bytes, bytes) -> None
"""
Properly fix the index on a specific string value of key. The key must
be one of those the index has been endowed with.
:param bytes key: the key to select
:param bytes value: the value which has to be selected to use the index
"""
codes_index_select_string = check_return(lib.codes_index_select_string)
codes_index_select_string(indexid, key, value)
def codes_index_select(indexid, key, value):
# type: (cffi.FFI.CData, bytes, T.Any) -> None
"""
Select the message subset with key==value.
:param indexid: id of an index created from a file.
The index must have been created with the key in argument.
:param bytes key: key to be selected
:param bytes value: value of the key to select
"""
if isinstance(value, int):
codes_index_select_long(indexid, key, value)
elif isinstance(value, float):
codes_index_select_double(indexid, key, value)
elif isinstance(value, bytes):
codes_index_select_string(indexid, key, value)
else:
raise RuntimeError("Key value not recognised: %r %r (type %r)" % (key, value, type(value)))
_codes_get_size = check_return(lib.codes_get_size)
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, bytes) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_size(handle, key, size)
return size[0]
_codes_get_length = check_return(lib.codes_get_length)
def codes_get_length(handle, key):
# type: (cffi.FFI.CData, bytes) -> int
"""
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_length(handle, key, size)
return size[0]
_codes_get_bytes = check_return(lib.codes_get_bytes)
def codes_get_bytes_array(handle, key, size=None):
# type: (cffi.FFI.CData, bytes, int) -> T.List[int]
"""
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
if size is None:
size = codes_get_size(handle, key)
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key, values, size_p)
return list(values)
_codes_get_long_array = check_return(lib.codes_get_long_array)
def codes_get_long_array(handle, key, size=None):
# type: (cffi.FFI.CData, bytes, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
if size is None:
size = codes_get_size(handle, key)
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key, values, size_p)
return list(values)
_codes_get_double_array = check_return(lib.codes_get_double_array)
def codes_get_double_array(handle, key, size=None):
# type: (cffi.FFI.CData, bytes, int) -> T.List[float]
"""
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
"""
if size is None:
size = codes_get_size(handle, key)
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key, values, size_p)
return list(values)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_string_array(handle, key, size=None, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
"""
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
"""
if size is None:
size = codes_get_size(handle, key)
if length is None:
length = codes_get_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key, values, size_p)
return [ffi.string(values[i]) for i in range(size_p[0])]
def codes_get_bytes(handle, key):
# type: (cffi.FFI.CData, bytes) -> int
"""
Get unsigned char element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: int
"""
values = codes_get_bytes_array(handle, key)
if len(values) == 0:
raise ValueError('No value for key %r' % key)
return values[-1]
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, bytes, int) -> bytes
"""
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
"""
if length is None:
length = codes_get_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
codes_get_string = check_return(lib.codes_get_string)
codes_get_string(handle, key, values, length_p)
return ffi.string(values, length_p[0])
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, bytes) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key, grib_type)
return grib_type[0]
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, bytes, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == CODES_TYPE_LONG:
return codes_get_long_array(handle, key, size=size)
elif key_type == CODES_TYPE_DOUBLE:
return codes_get_double_array(handle, key, size=size)
elif key_type == CODES_TYPE_STRING:
return codes_get_string_array(handle, key, size=size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size=size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, bytes, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == CODES_TYPE_LONG:
values = codes_get_long_array(handle, key, size=1) # type: T.Sequence[T.Any]
return values[0]
elif key_type == CODES_TYPE_DOUBLE:
values = codes_get_double_array(handle, key, size=1)
return values[0]
elif key_type == CODES_TYPE_STRING:
return codes_get_string(handle, key, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes(handle, key)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, bytes) -> cffi.FFI.CData
if namespace is None:
namespace = ffi.NULL
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, namespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def codes_get_api_version():
"""
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
"""
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
| 30.560554
| 99
| 0.691972
|
795033aff60052c00b4693a988adeb43873a0b22
| 8,050
|
py
|
Python
|
Giveaways.py
|
CodeButt3rs/CodeButt3rsBot
|
db81d332b6595c6f102080e0ddbba3524ae1f600
|
[
"Apache-2.0"
] | 1
|
2021-07-16T17:14:27.000Z
|
2021-07-16T17:14:27.000Z
|
Giveaways.py
|
CodeButt3rs/CodeButt3rsBot
|
db81d332b6595c6f102080e0ddbba3524ae1f600
|
[
"Apache-2.0"
] | null | null | null |
Giveaways.py
|
CodeButt3rs/CodeButt3rsBot
|
db81d332b6595c6f102080e0ddbba3524ae1f600
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import discord
import asyncio
import json
import random
import threading
from random import randrange
from discord_components.component import ButtonStyle
from discord_components import Button
from DatabaseTools import Database
from discord.ext import commands
from discord.utils import get
from tzlocal import get_localzone
from DjangoORM import giveawayDelete, giveawayObject, giveawayWinnerSet
class Giveaways(commands.Cog):
def __init__(self, bot):
self.bot = bot
print(datetime.datetime.now(), "Giveaways module loaded!")
@commands.has_any_role('🎉Giveaways')
@commands.guild_only()
@commands.group(name='giveaway')
async def giveaway(self, ctx):
if ctx.invoked_subcommand is None:
embed = discord.Embed(description='Choice correct giveaway command!')
await ctx.send(embed=embed)
@commands.has_permissions(administrator=True)
@commands.guild_only()
@giveaway.command(name='channel')
async def giveawayChannel(self, ctx):
fetch = await Database.getGiveawaysChannel(self=Database, guild=ctx.guild)
if get(ctx.guild.channels, id=fetch) is not None:
return print(datetime.datetime.now(), "Can't create Giveaways Channel while another one exists")
overwrites={
ctx.guild.default_role: discord.PermissionOverwrite(send_messages=False, read_messages=True)
}
channel = await ctx.guild.create_text_channel(name='🎉Giveaways', overwrites=overwrites)
await Database.setGiveawaysChannel(self=Database, guild=ctx.guild, id=channel.id)
print(datetime.datetime.now(), ctx.author, 'has created the Giveaways channel')
@commands.has_any_role('🎉Giveaways')
@commands.guild_only()
@giveaway.command(name='create')
async def giveawayCreate(self, ctx, time: int, item):
if time <= 0:
return await ctx.reply(f":pushpin: {ctx.author.mention}, I can't create giveaway with less 10 mins in time!")
fetch = await Database.getGiveawaysChannel(self=Database, guild=ctx.guild)
channel = get(ctx.guild.channels, id=fetch)
if channel is None:
return print(datetime.datetime.now(), "Can't create Giveaway: Channel doesn't exist")
emb = discord.Embed(
title = f'🎉 Giveaway # by {ctx.author.name}!',
color = ctx.author.color,
timestamp = (datetime.datetime.now().astimezone(get_localzone())),
colour=0xFFD966
)
end = datetime.datetime.now().astimezone(get_localzone()) + datetime.timedelta(seconds= time*60)
emb.add_field(name='Prize', value=item, inline=False)
emb.add_field(name='Ends at', value=end.strftime("%b %d %Y %H:%M:%S"), inline=False)
emb.add_field(name = 'Null', value = f'Null', inline=False )
emb.add_field(name = 'Null', value = f'Null', inline=False )
emb.set_footer(text=f'Created by {self.bot.user.name}')
msg = await channel.send('everyone',
embed=emb,
components =
[Button(label= '🎉 Enter giveaway', style=ButtonStyle.green)])
emb.title = f'🎉 Giveaway #{msg.id} by {ctx.author.name}!'
await msg.edit(embed=emb)
# JSON area
data = {
'time': f'{datetime.datetime.now().astimezone(get_localzone()).strftime("%b %d %Y %H:%M:%S")}',
'prize': item,
'hostedBy': ctx.author.id,
'status': True,
'winner': None,
'participants': [],
}
with open(f"Giveaways/{msg.id}.json", "w") as i:
json.dump(data, i)
print(datetime.datetime.now(), 'Giveaway #', msg.id, 'has created by', ctx.author, 'with item', item, 'and time', time)
t = threading.Thread(target=giveawayObject, args=(ctx, msg, end, item))
t.start()
t.join()
while time > 0:
with open(f"Giveaways/{msg.id}.json", "r") as i:
data = json.load(i)
if time <= 15:
emb.title = f'🎉 Giveaway #{msg.id} by {ctx.author.name}! LAST CHANCE TO ENTER!'
emb.colour = 0xFF0000
if time < 60:
emb.set_field_at(index= 2, name = 'Remaining time', value = f'**Ends in {time} mins**', inline=False )
else:
_timeHrs = time // 60
_timeMins = time - (_timeHrs * 60)
emb.set_field_at(index= 2, name = 'Remaining time', value = f'**Ends in {_timeHrs} hrs and {_timeMins} mins**', inline=False )
emb.set_field_at(index = 3, name = 'Number of participants', value = f"`{len(data['participants'])}`", inline=False )
try:
await msg.edit(embed=emb)
except:
print(datetime.datetime.now(), "Can't find giveaway: maybe it was deleted")
threading.Thread(target=giveawayDelete(msg)).start()
break
time += -1
await asyncio.sleep(60)
if time <= 0:
emb.clear_fields()
emb.title = f'🎉 Giveaway #{msg.id} by {ctx.author.name}'
with open(f"Giveaways/{msg.id}.json", "r") as i:
data = json.load(i)
data['status'] = False
if (len(data['participants'])) == 0:
emb.add_field(name='Winner', value='No valid entrants, so a winner could not be determined!')
emb.add_field(name='Prize', value=item, inline=False)
data['winner'] = 'No valid entrants'
with open(f"Giveaways/{msg.id}.json", "w") as i:
json.dump(data, i)
print(datetime.datetime.now(), 'Giveaway #', msg.id, 'created by', ctx.author, 'has ended! No valid entrants, so a winner could not be determined.')
threading.Thread(target=giveawayWinnerSet(msg, "No valid entrants")).start()
return await msg.edit(embed=emb, components = [])
else:
random.seed(randrange(10000))
winnerNumber = randrange(len(data['participants']))
winnerId = data['participants'][winnerNumber]
winner = get(ctx.guild.members, id=winnerId)
emb.add_field(name='Winner', value=f'{winner.mention} won {item}!')
emb.colour = 0xFFD966
emb.add_field(name='Ended at', value=end.strftime("%b %d %Y %H:%M:%S"), inline=False)
await msg.edit(embed=emb, components = [])
data['winner'] = winner.id
print(datetime.datetime.now(), 'Giveaway #', msg.id, 'created by', ctx.author, 'has ended! Random Number -', winnerNumber, ',', winner,'has won', item)
threading.Thread(target=giveawayWinnerSet(msg, winner.id)).start()
with open(f"Giveaways/{msg.id}.json", "w") as i:
json.dump(data, i)
@commands.Cog.listener()
async def on_button_click(self, interaction):
guild = get(self.bot.guilds, id=int(interaction.raw_data['d']['guild_id']))
if int(interaction.raw_data['d']['message']['id']) == await Database.getWelcomeMsg(Database, guild):
return
try:
with open(f"Giveaways/{int(interaction.raw_data['d']['message']['id'])}.json", "r") as i:
data = json.load(i)
if interaction.user.id in data['participants']:
return await interaction.respond(content = "You're already in giveaway list")
if data['hostedBy'] == interaction.user.id:
return await interaction.respond(content = "You can't participant in your own giveaway")
else:
data['participants'].append(interaction.user.id)
with open(f"Giveaways/{int(interaction.raw_data['d']['message']['id'])}.json", "w") as i:
json.dump(data, i)
return await interaction.respond(content = "You were added to the participants list")
except:
pass
def setup(bot):
bot.add_cog(Giveaways(bot))
| 50.3125
| 164
| 0.598261
|
795033f8c95d83ff27239242f77150aab8eac04b
| 5,049
|
py
|
Python
|
google/ads/google_ads/v4/services/transports/campaign_shared_set_service_grpc_transport.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v4/services/transports/campaign_shared_set_service_grpc_transport.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v4/services/transports/campaign_shared_set_service_grpc_transport.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.ads.google_ads.v4.proto.services import campaign_shared_set_service_pb2_grpc
class CampaignSharedSetServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.ads.googleads.v4.services CampaignSharedSetService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
)
def __init__(self, channel=None, credentials=None,
address='googleads.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.',
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'campaign_shared_set_service_stub': campaign_shared_set_service_pb2_grpc.CampaignSharedSetServiceStub(channel),
}
@classmethod
def create_channel(
cls,
address='googleads.googleapis.com:443',
credentials=None,
**kwargs):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
**kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def get_campaign_shared_set(self):
"""Return the gRPC stub for :meth:`CampaignSharedSetServiceClient.get_campaign_shared_set`.
Returns the requested campaign shared set in full detail.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['campaign_shared_set_service_stub'].GetCampaignSharedSet
@property
def mutate_campaign_shared_sets(self):
"""Return the gRPC stub for :meth:`CampaignSharedSetServiceClient.mutate_campaign_shared_sets`.
Creates or removes campaign shared sets. Operation statuses are returned.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['campaign_shared_set_service_stub'].MutateCampaignSharedSets
| 37.125
| 123
| 0.646465
|
7950344c30610719ecf652f4246cbb41d7c63ceb
| 1,178
|
py
|
Python
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/custom_audience_type.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/custom_audience_type.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/custom_audience_type.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'CustomAudienceTypeEnum',
},
)
class CustomAudienceTypeEnum(proto.Message):
r"""The types of custom audience.
"""
class CustomAudienceType(proto.Enum):
r"""Enum containing possible custom audience types."""
UNSPECIFIED = 0
UNKNOWN = 1
AUTO = 2
INTEREST = 3
PURCHASE_INTENT = 4
SEARCH = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| 28.047619
| 74
| 0.691002
|
795034f2b709de927c3d7655e99af0ab37f7a2a4
| 1,842
|
py
|
Python
|
libs/python/test/test_line_data_list_dto.py
|
Scripta-Qumranica-Electronica/SQE_API_Connectors
|
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
|
[
"MIT"
] | null | null | null |
libs/python/test/test_line_data_list_dto.py
|
Scripta-Qumranica-Electronica/SQE_API_Connectors
|
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
|
[
"MIT"
] | null | null | null |
libs/python/test/test_line_data_list_dto.py
|
Scripta-Qumranica-Electronica/SQE_API_Connectors
|
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
SQE API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import qumranica_api_connector
from qumranica_api_connector.models.line_data_list_dto import LineDataListDTO # noqa: E501
from qumranica_api_connector.rest import ApiException
class TestLineDataListDTO(unittest.TestCase):
"""LineDataListDTO unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test LineDataListDTO
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = qumranica_api_connector.models.line_data_list_dto.LineDataListDTO() # noqa: E501
if include_optional :
return LineDataListDTO(
lines = [
qumranica_api_connector.models.line_data_dto.LineDataDTO(
line_id = 56,
line_name = '0', )
]
)
else :
return LineDataListDTO(
lines = [
qumranica_api_connector.models.line_data_dto.LineDataDTO(
line_id = 56,
line_name = '0', )
],
)
def testLineDataListDTO(self):
"""Test LineDataListDTO"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 29.709677
| 124
| 0.623236
|
79503602a62e3b0ae4f93d55bf427b2497b5f25e
| 7,417
|
py
|
Python
|
tests/random.py
|
gravityshouldbenaut/MrMustard
|
6ccd1036559dd587d34c9837e1e2424f690ac306
|
[
"Apache-2.0"
] | null | null | null |
tests/random.py
|
gravityshouldbenaut/MrMustard
|
6ccd1036559dd587d34c9837e1e2424f690ac306
|
[
"Apache-2.0"
] | null | null | null |
tests/random.py
|
gravityshouldbenaut/MrMustard
|
6ccd1036559dd587d34c9837e1e2424f690ac306
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from hypothesis import given, strategies as st
from hypothesis.extra.numpy import arrays
from mrmustard.lab import *
angle = st.floats(min_value=0, max_value=2 * np.pi)
positive = st.floats(min_value=0, allow_infinity=False, allow_nan=False)
real = st.floats(allow_infinity=False, allow_nan=False)
r = st.floats(
min_value=0, max_value=0.5, allow_infinity=False, allow_nan=False
) # reasonable squeezing magnitude
real_not_zero = st.one_of(st.floats(max_value=-0.00001), st.floats(min_value=0.00001))
integer = st.integers(min_value=0, max_value=2 ** 32 - 1)
small_float = st.floats(min_value=-0.1, max_value=0.1, allow_infinity=False, allow_nan=False)
medium_float = st.floats(min_value=-1.0, max_value=1.0, allow_infinity=False, allow_nan=False)
large_float = st.floats(min_value=-10.0, max_value=10.0, allow_infinity=False, allow_nan=False)
num_modes = st.integers(min_value=0, max_value=10)
@st.composite
def vector(draw, length):
return draw(
st.lists(st.floats(min_value=-1.0, max_value=1.0), min_size=length, max_size=length)
)
# a strategy to produce a list of integers of length num_modes. the integers are all different and between 0 and num_modes
@st.composite
def modes(draw, num_modes):
return draw(
st.lists(
st.integers(min_value=0, max_value=num_modes), min_size=num_modes, max_size=num_modes
).filter(lambda x: len(set(x)) == len(x))
)
def array_of_(strategy, minlen=0, maxlen=None):
return arrays(dtype=np.float64, shape=(st.integers(minlen, maxlen),), elements=strategy)
def none_or_(strategy):
return st.one_of(st.just(None), strategy)
angle_bounds = st.tuples(none_or_(angle), none_or_(angle)).filter(
lambda t: t[0] < t[1] if t[0] is not None and t[1] is not None else True
)
positive_bounds = st.tuples(none_or_(positive), none_or_(positive)).filter(
lambda t: t[0] < t[1] if t[0] is not None and t[1] is not None else True
)
real_bounds = st.tuples(none_or_(real), none_or_(real)).filter(
lambda t: t[0] < t[1] if t[0] is not None and t[1] is not None else True
)
@st.composite
def random_Rgate(draw, num_modes=None, trainable=False):
return Rgate(
angle=draw(angle),
angle_bounds=draw(angle_bounds),
angle_trainable=trainable,
)
@st.composite
def random_Sgate(draw, num_modes=None, trainable=False, small=False):
return Sgate(
r=np.abs(draw(small_float)) if small else draw(r),
phi=draw(angle),
r_bounds=draw(positive_bounds),
phi_bounds=draw(angle_bounds),
r_trainable=trainable,
phi_trainable=trainable,
)
@st.composite
def random_Dgate(draw, num_modes=None, trainable=False, small=False):
if small:
x = draw(small_float)
y = draw(small_float)
else:
x = draw(medium_float)
y = draw(medium_float)
return Dgate(
x=x,
y=y,
x_bounds=draw(real_bounds),
y_bounds=draw(real_bounds),
x_trainable=trainable,
y_trainable=trainable,
)
@st.composite
def random_S2gate(draw, trainable=False):
return S2gate(
r=draw(r),
phi=draw(angle),
r_bounds=draw(positive_bounds),
phi_bounds=draw(angle_bounds),
r_trainable=trainable,
phi_trainable=trainable,
)
@st.composite
def random_BSgate(draw, trainable=False):
return BSgate(
theta=draw(angle),
phi=draw(angle),
theta_bounds=draw(angle_bounds),
phi_bounds=draw(angle_bounds),
theta_trainable=trainable,
phi_trainable=trainable,
)
@st.composite
def random_MZgate(draw, trainable=False):
return MZgate(
phi_a=draw(angle),
phi_b=draw(angle),
phi_a_bounds=draw(angle_bounds),
phi_b_bounds=draw(angle_bounds),
phi_a_trainable=trainable,
phi_b_trainable=trainable,
internal=draw(st.booleans()),
)
@st.composite
def random_Interferometer(draw, num_modes, trainable=False):
return Interferometer(num_modes=num_modes, orthogonal_trainable=trainable)
@st.composite
def random_Ggate(draw, num_modes, trainable=False):
displacement = vector(2 * num_modes)
return Ggate(
num_modes=num_modes,
displacement=draw(displacement),
displacement_trainable=trainable,
)
@st.composite
def single_mode_unitary(draw, small=False):
return draw(
st.one_of(random_Rgate(1), random_Sgate(1, small=small), random_Dgate(1, small=small))
)
@st.composite
def two_mode_gate(draw):
return draw(
st.one_of(
random_S2gate(),
random_BSgate(),
random_MZgate(),
random_Ggate(num_modes=2),
random_Interferometer(num_modes=2),
)
)
@st.composite
def n_mode_gate(draw, num_modes=None):
return draw(st.one_of(random_Interferometer(num_modes), random_Ggate(num_modes)))
## states
@st.composite
def squeezed_vacuum(draw, num_modes):
r = array_of_(r, num_modes, num_modes)
phi = array_of_(angle, num_modes, num_modes)
return SqueezedVacuum(r=draw(r), phi=draw(phi))
@st.composite
def displacedsqueezed(draw, num_modes):
r = array_of_(small_float.filter(lambda r: r > 0.0), num_modes, num_modes)
phi_ = array_of_(angle, num_modes, num_modes)
x = array_of_(medium_float, num_modes, num_modes)
y = array_of_(medium_float, num_modes, num_modes)
return DisplacedSqueezed(r=draw(r), phi=draw(phi), x=draw(x), y=draw(x))
@st.composite
def coherent(draw, num_modes):
x = array_of_(medium_float, num_modes, num_modes)
y = array_of_(medium_float, num_modes, num_modes)
return Coherent(x=draw(x), y=draw(y))
@st.composite
def tmsv(draw):
r = array_of_(medium_float.filter(lambda r: r > 0.0), 2, 2)
phi = array_of_(angle, 2, 2)
return TMSV(r=draw(r), phi=draw(phi))
@st.composite
def thermal(draw, num_modes):
n_mean = array_of_(medium_float.filter(lambda r: r > 0.0), num_modes, num_modes)
return Thermal(n_mean=draw(n_mean))
@st.composite
def default_state(draw, num_modes):
return draw(
st.one_of(
squeezed_vacuum(num_modes),
displacedsqueezed(num_modes),
coherent(num_modes),
tmsv(num_modes),
thermal(num_modes),
)
)
@st.composite
def default_pure_state(draw, num_modes):
return draw(
st.one_of(
squeezed_vacuum(num_modes),
displacedsqueezed(num_modes),
coherent(num_modes),
tmsv(num_modes),
)
)
@st.composite
def pure_state(draw, num_modes=1, small=False):
S = draw(random_Sgate(num_modes, small=small))
I = draw(random_Interferometer(num_modes))
D = draw(random_Dgate(num_modes, small=small))
return Vacuum(num_modes) >> S >> I >> D
| 29.086275
| 122
| 0.684913
|
7950380007661c1052ac20305267fd048615dc08
| 5,610
|
py
|
Python
|
src/python/fsqio/pants/node/tasks/webpack_resolve.py
|
Eric-Arellano/fsqio
|
9f809badb38679a00adef5b7cab3ad47e33391d0
|
[
"Apache-2.0"
] | null | null | null |
src/python/fsqio/pants/node/tasks/webpack_resolve.py
|
Eric-Arellano/fsqio
|
9f809badb38679a00adef5b7cab3ad47e33391d0
|
[
"Apache-2.0"
] | null | null | null |
src/python/fsqio/pants/node/tasks/webpack_resolve.py
|
Eric-Arellano/fsqio
|
9f809badb38679a00adef5b7cab3ad47e33391d0
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2016 Foursquare Labs Inc. All Rights Reserved.
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import logging
import os
from pants.base.build_environment import get_buildroot
from pants.base.fingerprint_strategy import DefaultFingerprintStrategy
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.build_graph.address import Address
from pants.build_graph.target import Target
from pants.contrib.node.tasks.node_paths import NodePaths
from pants.contrib.node.tasks.node_resolve import NodeResolve
from pants.util.memo import memoized_property
from fsqio.pants.node.subsystems.resolvers.webpack_resolver import WebPackResolver
from fsqio.pants.node.subsystems.webpack_distribution import WebPackDistribution
from fsqio.pants.node.targets.webpack_module import WebPackModule
logger = logging.getLogger(__name__)
class WebPackResolveFingerprintStrategy(DefaultFingerprintStrategy):
def compute_fingerprint(self, target):
# TODO(mateo): Needs to mixin the node distribution from upstream node tests.
super_fingerprint = super(WebPackResolveFingerprintStrategy, self).compute_fingerprint(target)
if not isinstance(target, WebPackModule):
return super_fingerprint
hasher = hashlib.sha1()
hasher.update(super_fingerprint)
hasher.update(target.npm_json)
with open(os.path.join(get_buildroot(), target.npm_json), 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
class ResolvedWebPackDistribution(Target):
def __init__(self, distribution_fingerprint=None, *args, **kwargs):
"""Synthetic target that represents a resolved webpack distribution."""
# Creating the synthetic target lets us avoid any special casing in regards to build order or cache invalidation.
payload = Payload()
payload.add_fields({
'distribution_fingerprint': PrimitiveField(distribution_fingerprint),
})
super(ResolvedWebPackDistribution, self).__init__(payload=payload, *args, **kwargs)
class WebPackResolve(NodeResolve):
@classmethod
def implementation_version(cls):
return super(WebPackResolve, cls).implementation_version() + [('WebPackResolve', 7.1)]
@classmethod
def subsystem_dependencies(cls):
return super(WebPackResolve, cls).subsystem_dependencies() + (WebPackResolver, WebPackDistribution,)
@memoized_property
def webpack_subsystem(self):
return WebPackDistribution.global_instance()
@classmethod
def prepare(cls, options, round_manager):
# This purposefully clobbers the super class prepare(), because it registers the products of every Resolver
# subsystem, and that causes a cycle with Webpack tasks that want to add to the compile_classpath.
# pylint: disable=no-member
WebPackResolver.prepare(options, round_manager)
@classmethod
def product_types(cls):
return ['webpack_distribution', NodePaths]
def cache_target_dirs(self):
return True
# NOTE(mateo): Override from NodeTask to allow us to pass through custom npm args.
def install_module(
self, target=None, package_manager=None,
install_optional=False, production_only=False, force=False,
node_paths=None, workunit_name=None, workunit_labels=None):
"""Installs node module using requested package_manager."""
package_manager = package_manager or self.node_distribution.get_package_manager(package_manager=package_manager)
module_args = package_manager._get_installation_args(
install_optional=install_optional,
production_only=production_only,
force=force,
frozen_lockfile=None,
)
npm_options = self.webpack_subsystem.get_distribution_args()
args = list(npm_options + module_args)
command = package_manager.run_command(args=args, node_paths=node_paths)
return self._execute_command(
command, workunit_name=workunit_name, workunit_labels=workunit_labels)
def execute(self):
targets = self.context.targets(predicate=self._can_resolve_target)
if not targets:
return
node_paths = self.context.products.get_data(NodePaths, init_func=NodePaths)
invalidation_context = self.invalidated(
targets,
fingerprint_strategy=WebPackResolveFingerprintStrategy(),
topological_order=True,
invalidate_dependents=True,
)
with invalidation_context as invalidation_check:
webpack_distribution_target = self.create_synthetic_target(self.fingerprint)
build_graph = self.context.build_graph
for vt in invalidation_check.all_vts:
if not vt.valid:
resolver_for_target_type = self._resolver_for_target(vt.target).global_instance()
resolver_for_target_type.resolve_target(self, vt.target, vt.results_dir, node_paths)
node_paths.resolved(vt.target, vt.results_dir)
build_graph.inject_dependency(
dependent=vt.target.address,
dependency=webpack_distribution_target.address,
)
def create_synthetic_target(self, global_fingerprint):
"""Return a synthetic target that represents the resolved webpack distribution."""
spec_path = os.path.join(os.path.relpath(self.workdir, get_buildroot()))
name = "webpack-distribution-{}".format(global_fingerprint)
address = Address(spec_path=spec_path, target_name=name)
logger.debug("Adding synthetic ResolvedWebPackDistribution target: {}".format(name))
new_target = self.context.add_new_target(
address,
ResolvedWebPackDistribution,
distribution_fingerprint=global_fingerprint
)
return new_target
| 40.071429
| 117
| 0.77451
|
795038a9d2cd47bd1f29f08bff644074623b8408
| 10,608
|
py
|
Python
|
mmtbx/refinement/real_space/tst_fit_residues_4.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-03-18T12:31:57.000Z
|
2022-03-14T06:27:06.000Z
|
mmtbx/refinement/real_space/tst_fit_residues_4.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/refinement/real_space/tst_fit_residues_4.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2020-02-04T15:39:06.000Z
|
2020-02-04T15:39:06.000Z
|
from __future__ import absolute_import, division, print_function
import time
import mmtbx.refinement.real_space.fit_residues
import mmtbx.refinement.real_space
pdb_answer = """\
CRYST1 23.341 28.568 19.164 90.00 90.00 90.00 P 1
ATOM 1 N PHE A 58 8.659 20.073 11.185 1.00 7.73 N
ATOM 2 CA PHE A 58 9.250 19.144 10.233 1.00 8.65 C
ATOM 3 C PHE A 58 9.039 17.721 10.706 1.00 9.84 C
ATOM 4 O PHE A 58 9.023 17.464 11.919 1.00 8.58 O
ATOM 5 CB PHE A 58 10.754 19.416 10.061 1.00 8.60 C
ATOM 6 CG PHE A 58 11.066 20.536 9.101 1.00 10.13 C
ATOM 7 CD1 PHE A 58 11.673 21.701 9.544 1.00 8.55 C
ATOM 9 CE1 PHE A 58 11.950 22.730 8.669 1.00 9.55 C
ATOM 10 CE2 PHE A 58 11.009 21.455 6.871 1.00 10.11 C
ATOM 11 CZ PHE A 58 11.621 22.610 7.327 1.00 10.71 C
ATOM 12 N HIS A 59 8.887 16.797 9.756 1.00 9.65 N
ATOM 13 CA HIS A 59 8.720 15.382 10.080 1.00 5.80 C
ATOM 14 C HIS A 59 9.606 14.539 9.169 1.00 10.35 C
ATOM 15 O HIS A 59 9.972 14.972 8.075 1.00 10.56 O
ATOM 16 CB HIS A 59 7.250 14.977 9.971 1.00 7.59 C
ATOM 17 CG HIS A 59 6.333 15.874 10.738 1.00 9.83 C
ATOM 19 CD2 HIS A 59 5.689 17.009 10.376 1.00 8.78 C
ATOM 20 CE1 HIS A 59 5.211 16.616 12.481 1.00 9.20 C
ATOM 21 NE2 HIS A 59 5.000 17.452 11.479 1.00 9.98 N
ATOM 22 N TRP A 60 9.964 13.344 9.625 1.00 9.39 N
ATOM 23 CA TRP A 60 11.067 12.594 9.017 1.00 11.89 C
ATOM 24 C TRP A 60 10.635 11.189 8.608 1.00 9.81 C
ATOM 25 O TRP A 60 10.120 10.434 9.430 1.00 8.97 O
ATOM 26 CB TRP A 60 12.197 12.509 10.046 1.00 11.46 C
ATOM 27 CG TRP A 60 12.933 13.782 10.392 1.00 14.68 C
ATOM 28 CD1 TRP A 60 12.677 14.629 11.446 1.00 12.97 C
ATOM 29 CD2 TRP A 60 13.987 14.394 9.638 1.00 14.91 C
ATOM 30 NE1 TRP A 60 13.524 15.715 11.398 1.00 9.65 N
ATOM 32 CE3 TRP A 60 14.673 14.043 8.472 1.00 8.58 C
ATOM 33 CZ2 TRP A 60 15.350 16.433 9.839 1.00 12.03 C
ATOM 34 CZ3 TRP A 60 15.670 14.879 8.017 1.00 14.50 C
ATOM 35 CH2 TRP A 60 16.002 16.057 8.697 1.00 11.88 C
ATOM 36 N ARG A 61 10.858 10.832 7.348 1.00 7.72 N
ATOM 37 CA ARG A 61 10.510 9.497 6.870 1.00 9.11 C
ATOM 38 C ARG A 61 11.692 8.812 6.178 1.00 10.61 C
ATOM 39 O ARG A 61 11.963 9.081 5.006 1.00 11.05 O
ATOM 40 CB ARG A 61 9.318 9.570 5.914 1.00 20.00 C
ATOM 41 CG ARG A 61 9.425 8.639 4.717 1.00 20.00 C
ATOM 42 CD ARG A 61 8.264 8.745 3.741 1.00 20.00 C
ATOM 43 NE ARG A 61 7.574 10.028 3.848 1.00 20.00 N
ATOM 44 CZ ARG A 61 6.414 10.300 3.269 1.00 20.00 C
ATOM 47 N PRO A 62 12.408 7.927 6.895 1.00 10.62 N
ATOM 48 CA PRO A 62 13.520 7.240 6.220 1.00 7.26 C
ATOM 49 C PRO A 62 13.019 6.486 5.000 1.00 10.75 C
ATOM 50 O PRO A 62 11.946 5.890 5.048 1.00 11.44 O
ATOM 51 CB PRO A 62 14.025 6.257 7.281 1.00 6.15 C
ATOM 52 CG PRO A 62 13.701 6.929 8.583 1.00 9.40 C
ATOM 53 CD PRO A 62 12.359 7.621 8.338 1.00 8.83 C
TER 54 PRO A 62
HETATM 55 O HOH S 30 9.529 12.770 12.418 1.00 9.26 O
HETATM 56 O HOH S 63 17.561 6.671 6.582 1.00 8.34 O
HETATM 57 O HOH S 102 16.677 8.520 10.394 1.00 6.42 O
HETATM 58 O HOH S 116 7.555 10.547 10.276 1.00 11.56 O
HETATM 59 O HOH S 167 8.683 23.568 14.164 1.00 17.13 O
HETATM 60 O HOH S 171 15.615 21.403 10.635 1.00 9.12 O
HETATM 61 O HOH S 176 10.243 21.293 13.219 1.00 18.50 O
HETATM 62 O HOH S 192 9.980 5.000 7.291 1.00 24.87 O
HETATM 63 O HOH S 277 18.341 19.685 8.800 1.00 14.61 O
TER 64 HOH S 277
END
"""
pdb_poor = """\
CRYST1 23.341 28.568 19.164 90.00 90.00 90.00 P 1
ATOM 1 N PHE A 58 8.659 20.073 11.185 1.00 7.73 N
ATOM 2 CA PHE A 58 9.250 19.144 10.233 1.00 8.65 C
ATOM 3 C PHE A 58 9.039 17.721 10.706 1.00 9.84 C
ATOM 4 O PHE A 58 9.023 17.464 11.919 1.00 8.58 O
ATOM 5 CB PHE A 58 10.754 19.416 10.061 1.00 8.60 C
ATOM 6 CG PHE A 58 11.545 19.305 11.340 1.00 10.13 C
ATOM 7 CD1 PHE A 58 12.066 18.088 11.748 1.00 8.55 C
ATOM 9 CE1 PHE A 58 12.781 17.983 12.923 1.00 9.55 C
ATOM 10 CE2 PHE A 58 12.466 20.325 13.323 1.00 10.11 C
ATOM 11 CZ PHE A 58 12.986 19.103 13.714 1.00 10.71 C
ATOM 12 N HIS A 59 8.887 16.797 9.756 1.00 9.65 N
ATOM 13 CA HIS A 59 8.720 15.382 10.080 1.00 5.80 C
ATOM 14 C HIS A 59 9.606 14.539 9.169 1.00 10.35 C
ATOM 15 O HIS A 59 9.972 14.972 8.075 1.00 10.56 O
ATOM 16 CB HIS A 59 7.250 14.977 9.971 1.00 7.59 C
ATOM 17 CG HIS A 59 6.357 15.744 10.892 1.00 9.83 C
ATOM 19 CD2 HIS A 59 6.246 15.728 12.242 1.00 8.78 C
ATOM 20 CE1 HIS A 59 4.758 17.143 11.468 1.00 9.20 C
ATOM 21 NE2 HIS A 59 5.242 16.606 12.575 1.00 9.98 N
ATOM 22 N TRP A 60 9.964 13.344 9.625 1.00 9.39 N
ATOM 23 CA TRP A 60 11.067 12.594 9.017 1.00 11.89 C
ATOM 24 C TRP A 60 10.635 11.189 8.608 1.00 9.81 C
ATOM 25 O TRP A 60 10.120 10.434 9.430 1.00 8.97 O
ATOM 26 CB TRP A 60 12.197 12.509 10.046 1.00 11.46 C
ATOM 27 CG TRP A 60 13.439 11.737 9.668 1.00 14.68 C
ATOM 28 CD1 TRP A 60 13.807 10.487 10.111 1.00 12.97 C
ATOM 29 CD2 TRP A 60 14.513 12.195 8.837 1.00 14.91 C
ATOM 30 NE1 TRP A 60 15.033 10.140 9.587 1.00 9.65 N
ATOM 32 CE3 TRP A 60 14.745 13.369 8.116 1.00 8.58 C
ATOM 33 CZ2 TRP A 60 16.663 11.282 8.064 1.00 12.03 C
ATOM 34 CZ3 TRP A 60 15.913 13.478 7.392 1.00 14.50 C
ATOM 35 CH2 TRP A 60 16.856 12.443 7.368 1.00 11.88 C
ATOM 36 N ARG A 61 10.858 10.832 7.348 1.00 7.72 N
ATOM 37 CA ARG A 61 10.510 9.497 6.870 1.00 9.11 C
ATOM 38 C ARG A 61 11.692 8.812 6.178 1.00 10.61 C
ATOM 39 O ARG A 61 11.963 9.081 5.006 1.00 11.05 O
ATOM 40 CB ARG A 61 9.318 9.570 5.914 1.00 20.00 C
ATOM 41 CG ARG A 61 8.017 9.993 6.576 1.00 20.00 C
ATOM 42 CD ARG A 61 6.821 10.020 5.637 1.00 20.00 C
ATOM 43 NE ARG A 61 5.613 10.499 6.303 1.00 20.00 N
ATOM 44 CZ ARG A 61 4.463 10.727 5.686 1.00 20.00 C
ATOM 47 N PRO A 62 12.408 7.927 6.895 1.00 10.62 N
ATOM 48 CA PRO A 62 13.520 7.240 6.220 1.00 7.26 C
ATOM 49 C PRO A 62 13.019 6.486 5.000 1.00 10.75 C
ATOM 50 O PRO A 62 11.946 5.890 5.048 1.00 11.44 O
ATOM 51 CB PRO A 62 14.025 6.257 7.281 1.00 6.15 C
ATOM 52 CG PRO A 62 12.802 5.932 8.087 1.00 9.40 C
ATOM 53 CD PRO A 62 12.029 7.250 8.167 1.00 8.83 C
TER 54 PRO A 62
HETATM 55 O HOH S 30 9.529 12.770 12.418 1.00 9.26 O
HETATM 56 O HOH S 63 17.561 6.671 6.582 1.00 8.34 O
HETATM 57 O HOH S 102 16.677 8.520 10.394 1.00 6.42 O
HETATM 58 O HOH S 116 7.555 10.547 10.276 1.00 11.56 O
HETATM 59 O HOH S 167 8.683 23.568 14.164 1.00 17.13 O
HETATM 60 O HOH S 171 15.615 21.403 10.635 1.00 9.12 O
HETATM 61 O HOH S 176 10.243 21.293 13.219 1.00 18.50 O
HETATM 62 O HOH S 192 9.980 5.000 7.291 1.00 24.87 O
HETATM 63 O HOH S 277 18.341 19.685 8.800 1.00 14.61 O
TER 64 HOH S 277
END
"""
pdb_for_map = pdb_answer
def exercise(i_pdb=0, d_min=1.5, resolution_factor=0.1):
"""
Partial (incomplete) residues. Just run to make sure it does not crash.
It will not fix incomplete residues.
"""
#
t = mmtbx.refinement.real_space.setup_test(
pdb_answer = pdb_answer,
pdb_poor = pdb_poor,
i_pdb = i_pdb,
d_min = d_min,
resolution_factor = resolution_factor,
pdb_for_map = pdb_for_map)
#
ph = t.ph_poor
for i in [1,]:
result = mmtbx.refinement.real_space.fit_residues.run(
pdb_hierarchy = ph,
vdw_radii = t.vdw,
crystal_symmetry = t.crystal_symmetry,
map_data = t.target_map,
do_all = True,
massage_map = False,
rotamer_manager = t.rotamer_manager,
sin_cos_table = t.sin_cos_table,
mon_lib_srv = t.mon_lib_srv)
ph = result.pdb_hierarchy
result.pdb_hierarchy.write_pdb_file(file_name = "refined_%s.pdb"%str(i_pdb),
crystal_symmetry = t.crystal_symmetry)
if(__name__ == "__main__"):
t0 = time.time()
exercise()
print("Time: %6.4f"%(time.time()-t0))
| 62.4
| 78
| 0.473888
|
795038f6d5e8d3c2b7a8a9577f0e108212286b85
| 495
|
py
|
Python
|
setup.py
|
stacks13/nbmerge
|
ee4e99d51278882d0ab1a9e7e1264fddc9da275e
|
[
"MIT"
] | null | null | null |
setup.py
|
stacks13/nbmerge
|
ee4e99d51278882d0ab1a9e7e1264fddc9da275e
|
[
"MIT"
] | null | null | null |
setup.py
|
stacks13/nbmerge
|
ee4e99d51278882d0ab1a9e7e1264fddc9da275e
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='nbmerge',
version='0.1',
packages=setuptools.find_packages(),
url='https://github.com/stacks13/nbmerge',
license='',
long_description=long_description,
long_description_content_type="text/markdown",
author='Sahil Nirkhe',
author_email='sahilnirkhe@outlook.com',
description='Merges multiple notebooks together',
python_requires='>=3.6',
)
| 26.052632
| 53
| 0.69899
|
79503a91f8a84d86c5f441a096dee3e74dea4505
| 314
|
py
|
Python
|
tests/test_skeleton.py
|
DarrenMun/flaskPyscaffold
|
74926da002acaa2079d4fc4c680bf17fc5cc0e11
|
[
"MIT"
] | null | null | null |
tests/test_skeleton.py
|
DarrenMun/flaskPyscaffold
|
74926da002acaa2079d4fc4c680bf17fc5cc0e11
|
[
"MIT"
] | null | null | null |
tests/test_skeleton.py
|
DarrenMun/flaskPyscaffold
|
74926da002acaa2079d4fc4c680bf17fc5cc0e11
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from my_project.skeleton import fib
__author__ = "creminsg"
__copyright__ = "creminsg"
__license__ = "mit"
def test_fib():
assert fib(1) == 1
assert fib(2) == 1
assert fib(7) == 13
with pytest.raises(AssertionError):
fib(-10)
| 17.444444
| 39
| 0.640127
|
79503ab684e9df04223791f1cd2b02bf7e4c464e
| 5,415
|
py
|
Python
|
scripts/viz_one_sim.py
|
idc9/repro_lap_reg
|
1d3e846f8f2c3d04b4153d9ac56e0e9bd37198ca
|
[
"MIT"
] | null | null | null |
scripts/viz_one_sim.py
|
idc9/repro_lap_reg
|
1d3e846f8f2c3d04b4153d9ac56e0e9bd37198ca
|
[
"MIT"
] | null | null | null |
scripts/viz_one_sim.py
|
idc9/repro_lap_reg
|
1d3e846f8f2c3d04b4153d9ac56e0e9bd37198ca
|
[
"MIT"
] | null | null | null |
from os.path import join, exists
from joblib import load
import numpy as np
# import matplotlib.pyplot as plt
import argparse
from itertools import product
import sys
from fclsp.reshaping_utils import fill_hollow_sym
from repro_lap_reg.viz_fit import viz_path_diagnostics, heatmap,\
print_estimators
# from repro_lap_reg.results_parsing import get_path_best_vs_truth
from repro_lap_reg.viz_utils import savefig
from repro_lap_reg.utils import join_and_make
from repro_lap_reg.sim_script_utils import get_param_as_adj
from repro_lap_reg.ResultsWriter import ResultsWriter
parser = argparse.\
ArgumentParser(description='Visualize results for one simulation.')
parser.add_argument('--name', default='meow',
help='File name stub.')
parser.add_argument('--kind', default='covar',
help='What kind of model are we looking at.')
parser.add_argument('--print_only', action='store_true', default=False,
help='Show print out results only.')
parser.add_argument('--out_data_dir',
default='out_data/',
help='Directory for output data.')
parser.add_argument('--results_dir',
default='results/',
help='Directory where visualizations should be saved.')
args = parser.parse_args()
dpi = 100
#########
# paths #
#########
results_dir = args.results_dir
out_data_dir = args.out_data_dir
results_fpath = join(out_data_dir, args.kind, args.name, 'results')
model_fpath = join(out_data_dir, args.kind, args.name, 'models')
save_dir = join_and_make(results_dir, args.kind, args.name)
################
# load results #
################
# load results output
out = load(results_fpath)
# maybe load saved modes
if exists(model_fpath):
models = load(model_fpath)
else:
models = None
# results for the models their best tuning path values
# path_best_vs_truth = get_path_best_vs_truth(out)
# get the true parameter we are targeting
if args.kind == 'covar':
true_param_adj = out['true_param']
elif args.kind in ['lin_reg', 'log_reg']:
true_param_adj = fill_hollow_sym(out['true_param'])
elif args.kind == 'means_est':
true_param_adj = out['true_param'] # fill_hollow_sym(out['true_param'])
##################
# Visualizations #
##################
# create log
writer = ResultsWriter(fpath=join(save_dir, 'log.txt'))
writer.write(out['args'], newlines=1)
writer.write('Simulation ran at {} and took {:1.2f} seconds'.
format(out['sim_datetime'], out['sim_runtime']), newlines=3)
# print models
if models is not None:
print_estimators(estimators=models['fit'], print_func=writer.write)
# save fit runtimes data frame
out['fit']['results'].\
query("vs == 'truth'").\
set_index('model')['runtime'].\
sort_values().\
to_csv(join(save_dir, 'fit_runtimes.csv'), float_format='%1.3f')
# Error metrics for selected models and best path models
for metric, vs in product(['L2', 'support_error'],
['truth', 'oracle']):
# cross-validation selection results vs true parameter
out['fit']['results'].\
query("vs == @vs")[['model', metric]].\
set_index('model').\
sort_values(metric).\
astype(float).\
to_csv(join(save_dir, 'vs_{}_fit_{}.csv'. format(vs, metric)),
float_format='%1.4f')
# get results for best parameter in tuning path
# path_best_vs_truth.\
# query("vs == @vs")[['model', metric]].\
# set_index('model').\
# sort_values(metric).\
# astype(float).\
# to_csv(join(save_dir, 'vs_{}_best_path_{}.csv'. format(vs, metric)),
# float_format='%1.4f')
out['path']['results'].\
query("vs == @vs").\
groupby('model')[metric].\
min().\
sort_values().\
astype(float).\
to_csv(join(save_dir, 'vs_{}_best_path_{}.csv'. format(vs, metric)),
float_format='%1.4f')
if args.print_only:
sys.exit()
# plot visual diagonstics for models with tuning path
for model_name in out['path']['param_seq'].keys():
model_dir = join_and_make(save_dir, model_name)
viz_path_diagnostics(out=out, models=models, model_name=model_name,
save_dir=model_dir)
# summarize path runtimes
res = out['path']['results'].query("vs == 'truth'")
path_runtime_summaries = res.\
groupby('model')['runtime'].\
agg(**{'mean': np.mean,
'median': np.median,
'std': np.std,
'min': np.min,
'max': np.max}).\
sort_values("mean")
path_runtime_summaries.to_csv(join(save_dir, 'path_runtime_summary.csv'))
#################################################
# Heatmaps of the true and estimated parameters #
#################################################
heatmap(true_param_adj)
savefig(join(save_dir, 'true.png'), dpi=dpi)
if models is not None:
# estimate from cv-fit
for model_name, model in models['fit'].items():
model_dir = join_and_make(save_dir, model_name)
heatmap(get_param_as_adj(model, kind=args.kind))
savefig(join(model_dir, 'fit.png'), dpi=dpi)
# cestimate from best path
for model_name, model in models['best_path'].items():
model_dir = join_and_make(save_dir, model_name)
heatmap(get_param_as_adj(model, kind=args.kind))
savefig(join(model_dir, 'best_path.png'), dpi=dpi)
| 31.12069
| 78
| 0.638412
|
79503ac35bfc04c187863dda65e6afe9507af37b
| 6,498
|
py
|
Python
|
tools/nuscenes_to_coco.py
|
albertchristian92/RRPN
|
fd54271b74bbd1cd43cc00f46fd41b19336b4993
|
[
"MIT"
] | 83
|
2019-05-03T15:21:27.000Z
|
2022-02-01T19:55:25.000Z
|
tools/nuscenes_to_coco.py
|
albertchristian92/RRPN
|
fd54271b74bbd1cd43cc00f46fd41b19336b4993
|
[
"MIT"
] | 11
|
2019-06-18T07:33:12.000Z
|
2021-09-01T06:49:50.000Z
|
tools/nuscenes_to_coco.py
|
albertchristian92/RRPN
|
fd54271b74bbd1cd43cc00f46fd41b19336b4993
|
[
"MIT"
] | 31
|
2019-05-03T12:39:00.000Z
|
2022-02-01T19:55:28.000Z
|
import _init_path
import os
import sys
import pickle
import numpy as np
import argparse
from tqdm import tqdm, trange
from cocoplus.coco import COCO_PLUS
from pynuscenes.utils.nuscenes_utils import nuscenes_box_to_coco, nuscene_cat_to_coco
from pynuscenes.nuscenes_dataset import NuscenesDataset
from nuscenes.utils.geometry_utils import view_points
def parse_args():
# Parse the input arguments
parser = argparse.ArgumentParser(description='Converts the NuScenes dataset to COCO format')
parser.add_argument('--nusc_root', default='../data/nuscenes',
help='NuScenes dataroot')
parser.add_argument('--split', default='mini_train',
help='Dataset split (mini_train, mini_val, train, val, test)')
parser.add_argument('--out_dir', default='../data/nucoco/',
help='Output directory for the nucoco dataset')
parser.add_argument('--nsweeps_radar', default=1, type=int,
help='Number of Radar sweeps to include')
parser.add_argument('--use_symlinks', default='False',
help='Create symlinks to nuScenes images rather than copying them')
parser.add_argument('--cameras', nargs='+',
default=['CAM_FRONT',
'CAM_BACK',
# 'CAM_FRONT_LEFT',
# 'CAM_FRONT_RIGHT',
# 'CAM_BACK_LEFT',
# 'CAM_BACK_RIGHT',
],
help='List of cameras to use.')
parser.add_argument('-l', '--logging_level', default='INFO',
help='Logging level')
args = parser.parse_args()
return args
#-------------------------------------------------------------------------------
def main():
args = parse_args()
if "mini" in args.split:
nusc_version = "v1.0-mini"
elif "test" in args.split:
nusc_version = "v1.0-test"
else:
nusc_version = "v1.0-trainval"
## Categories: [category, supercategory, category_id]
categories = [['person', 'person' , 1],
['bicylce', 'vehicle', 2],
['car', 'vehicle', 3],
['motorcycle', 'vehicle', 4],
['bus', 'vehicle', 5],
['truck', 'vehicle', 6]
]
## Short split is used for filenames
anns_file = os.path.join(args.out_dir, 'annotations', 'instances_' + args.split + '.json')
nusc_dataset = NuscenesDataset(nusc_path=args.nusc_root,
nusc_version=nusc_version,
split=args.split,
coordinates='vehicle',
nsweeps_radar=args.nsweeps_radar,
sensors_to_return=['camera', 'radar'],
pc_mode='camera',
logging_level=args.logging_level)
coco_dataset = COCO_PLUS(logging_level="INFO")
coco_dataset.create_new_dataset(dataset_dir=args.out_dir, split=args.split)
## add all category in order to have consistency between dataset splits
for (coco_cat, coco_supercat, coco_cat_id) in categories:
coco_dataset.addCategory(coco_cat, coco_supercat, coco_cat_id)
## Get samples from the Nuscenes dataset
num_samples = len(nusc_dataset)
for i in trange(num_samples):
sample = nusc_dataset[i]
img_ids = sample['img_id']
for i, cam_sample in enumerate(sample['camera']):
if cam_sample['camera_name'] not in args.cameras:
continue
img_id = int(img_ids[i])
image = cam_sample['image']
pc = sample['radar'][i]
cam_cs_record = cam_sample['cs_record']
img_height, img_width, _ = image.shape
# Create annotation in coco_dataset format
sample_anns = []
annotations = nusc_dataset.pc_to_sensor(sample['annotations'][i],
cam_cs_record)
for ann in annotations:
coco_cat, coco_cat_id, coco_supercat = nuscene_cat_to_coco(ann.name)
## if not a valid category, go to the next annotation
if coco_cat is None:
coco_dataset.logger.debug('Skipping ann with category: {}'.format(ann.name))
continue
cat_id = coco_dataset.addCategory(coco_cat, coco_supercat, coco_cat_id)
bbox = nuscenes_box_to_coco(ann, np.array(cam_cs_record['camera_intrinsic']),
(img_width, img_height))
coco_ann = coco_dataset.createAnn(bbox, cat_id)
sample_anns.append(coco_ann)
## Map the Radar pointclouds to image
pc_cam = nusc_dataset.pc_to_sensor(pc, cam_cs_record)
pc_depth = pc_cam[2, :]
pc_image = view_points(pc_cam[:3, :],
np.array(cam_cs_record['camera_intrinsic']),
normalize=True)
## Add the depth information to each point
pc_coco = np.vstack((pc_image[:2,:], pc_depth))
pc_coco = np.transpose(pc_coco).tolist()
## Add sample to the COCO dataset
coco_img_path = coco_dataset.addSample(img=image,
anns=sample_anns,
pointcloud=pc_coco,
img_id=img_id,
other=cam_cs_record,
img_format='RGB',
write_img= not args.use_symlinks,
)
if args.use_symlinks:
try:
os.symlink(os.path.abspath(cam_sample['cam_path']), coco_img_path)
except FileExistsError:
pass
## Uncomment to visualize
# coco_dataset.showImgAnn(np.asarray(image), sample_anns, bbox_only=True, BGR=False)
coco_dataset.saveAnnsToDisk()
if __name__ == '__main__':
main()
| 41.922581
| 96
| 0.517698
|
79503c3ce15efdb889ce64033029f2ff8292c255
| 59,826
|
py
|
Python
|
lang/text_analysis.py
|
gtoffoli/commons-cops
|
e4b1f556c550e25bb2e6a9eabe8db963877c08d3
|
[
"MIT"
] | null | null | null |
lang/text_analysis.py
|
gtoffoli/commons-cops
|
e4b1f556c550e25bb2e6a9eabe8db963877c08d3
|
[
"MIT"
] | null | null | null |
lang/text_analysis.py
|
gtoffoli/commons-cops
|
e4b1f556c550e25bb2e6a9eabe8db963877c08d3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-"""
from importlib import import_module
import string
import re
import json
import requests
import tempfile
from collections import defaultdict, OrderedDict
from operator import itemgetter
import textract
import readability
from bs4 import BeautifulSoup
# from django.http import HttpResponse, HttpResponseForbidden, HttpResponseNotFound, HttpResponseBadRequest, JsonResponse
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponseNotFound, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.contrib.flatpages.models import FlatPage
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from commons.models import Project, OER, SharedOer, LearningPath, PathNode, SharedLearningPath
from commons.models import FolderDocument
from commons.vocabularies import Language
from commons.forms import TextAnalysisInputForm
from commons.documents import Document
from commons.api import ProjectSerializer, OerSerializer, LearningPathSerializer, PathNodeSerializer
from commons.user_spaces import project_contents, user_contents
nlp_url = settings.NLP_URL
# nlp_url = 'http://localhost:8001'
obj_type_label_dict = {
'project': _('commonspaces project'),
'doc': _('document file'),
'oer': _('open educational resource'),
'pathnode': _('node of learning path'),
'lp': _('learning path'),
'resource': _('remote web resource'),
'text': _('manually input text'),
}
# from NLPBuddy
ENTITIES_MAPPING = {
'PERSON': 'person',
'LOC': 'location',
'GPE': 'location',
'ORG': 'organization',
}
# =====from NLPBuddy
POS_MAPPING = {
'NOUN': 'nouns',
'VERB': 'verbs',
'ADJ': 'adjectives',
'ADV': 'adverbs',
}
EMPTY_POS = [
'SPACE', 'PUNCT', 'CCONJ', 'SCONJ', 'DET', 'PRON', 'ADP', 'AUX', 'PART', 'SYM',
]
postag_color = 'cornflowerBlue'
entity_color = 'tomato'
dependency_color = 'purple'
# ===== froom BRAT; see http://brat.nlplab.org/configuration.html and https://brat.nlplab.org/embed.html
collData = {
'entity_types': [
{ 'type': 'ADJ', 'labels': ['adjective', 'adj'], 'bgColor': postag_color, 'borderColor': 'darken' }, # big, old, green, incomprehensible, first
{ 'type': 'ADP', 'labels': ['adposition', 'adp'], 'bgColor': postag_color, 'borderColor': 'darken' }, # in, to, during
{ 'type': 'ADV', 'labels': ['adverb', 'adv'], 'bgColor': postag_color, 'borderColor': 'darken' }, # very, tomorrow, down, where, there
{ 'type': 'AUX', 'labels': ['auxiliary', 'aux'], 'bgColor': postag_color, 'borderColor': 'darken' }, # is, has (done), will (do), should (do)
{ 'type': 'CONJ', 'labels': ['conjunction', 'conj'], 'bgColor': postag_color, 'borderColor': 'darken' }, # and, or, but
{ 'type': 'CCONJ', 'labels': ['coord.conj.', 'cconj'], 'bgColor': postag_color, 'borderColor': 'darken' }, # and, or, but
{ 'type': 'DET', 'labels': ['determiner', 'det'], 'bgColor': postag_color, 'borderColor': 'darken' }, # a, an, the
{ 'type': 'INTJ', 'labels': ['interjection', 'intj'], 'bgColor': postag_color, 'borderColor': 'darken' }, # psst, ouch, bravo, hello
{ 'type': 'NOUN', 'labels': ['noun', 'noun'], 'bgColor': postag_color, 'borderColor': 'darken' }, # girl, cat, tree, air, beauty
{ 'type': 'NUM', 'labels': ['numeral', 'num'], 'bgColor': postag_color, 'borderColor': 'darken' }, # 1, 2017, one, seventy-seven, IV, MMXIV
{ 'type': 'PART', 'labels': ['particle', 'part'], 'bgColor': postag_color, 'borderColor': 'darken' }, # ’s, not,
{ 'type': 'PRON', 'labels': ['pronoun', 'pron'], 'bgColor': postag_color, 'borderColor': 'darken' }, # I, you, he, she, myself, themselves, somebody
{ 'type': 'PROPN', 'labels': ['proper noun', 'propn'], 'bgColor': postag_color, 'borderColor': 'darken' }, # Mary, John, London, NATO, HBO
{ 'type': 'PUNCT', 'labels': ['punctuation', 'punct'], 'bgColor': postag_color, 'borderColor': 'darken' }, # ., (, ), ?
{ 'type': 'SCONJ', 'labels': ['sub.conj.', 'sconj'], 'bgColor': postag_color, 'borderColor': 'darken' }, # if, while, that
{ 'type': 'SYM', 'labels': ['symbol', 'sym'], 'bgColor': postag_color, 'borderColor': 'darken' }, # $, %, §, ©, +, −, ×, ÷, =, :), 😝
{ 'type': 'VERB', 'labels': ['verb', 'verb'], 'bgColor': postag_color, 'borderColor': 'darken' }, # un, runs, running, eat, ate, eating
{ 'type': 'X', 'labels': ['other', 'x'], 'bgColor': postag_color, 'borderColor': 'darken' }, # sfpksdpsxmsa
{ 'type': 'SPACE', 'labels': ['space', 'sp'], 'bgColor': postag_color, 'borderColor': 'darken' }, #
{ 'type': 'PERSON', 'labels': ['Person', 'Per'], 'bgColor': entity_color, 'borderColor': 'darken' }, # People, including fictional.
{ 'type': 'NORP', 'labels': ['NORP', 'NORP'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Nationalities or religious or political groups.
{ 'type': 'FAC', 'labels': ['Facility', 'Fac'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Buildings, airports, highways, bridges, etc.
{ 'type': 'ORG', 'labels': ['Organization', 'Org'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Companies, agencies, institutions, etc.
{ 'type': 'GPE', 'labels': ['Geo-pol.Entity', 'GPE'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Countries, cities, states.
{ 'type': 'LOC', 'labels': ['Non-GPE location', 'Loc'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Non-GPE locations, mountain ranges, bodies of water.
{ 'type': 'PRODUCT', 'labels': ['Product', 'Prod'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Objects, vehicles, foods, etc. (Not services.)
{ 'type': 'EVENT', 'labels': ['Event', 'Evnt'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Named hurricanes, battles, wars, sports events, etc.
{ 'type': 'WORK_OF_ART', 'labels': ['Work-of-Art', 'WoA'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Titles of books, songs, etc.
{ 'type': 'LAW', 'labels': ['Law', 'Law'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Named documents made into laws.
{ 'type': 'LANGUAGE', 'labels': ['Language', 'Lang'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Any named language.
{ 'type': 'DATE', 'labels': ['Date', 'Date'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Absolute or relative dates or periods.
{ 'type': 'TIME', 'labels': ['Time', 'Time'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Times smaller than a day.
{ 'type': 'PERCENT', 'labels': ['Percent', 'Perc'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Percentage, including ”%“.
{ 'type': 'MONEY', 'labels': ['Money', 'Money'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Monetary values, including unit.
{ 'type': 'QUANTITY', 'labels': ['Quantity', 'Quant'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Measurements, as of weight or distance.
{ 'type': 'ORDINAL', 'labels': ['Ordinal', 'Ord'], 'bgColor': entity_color, 'borderColor': 'darken' }, # “first”, “second”, etc.
{ 'type': 'CARDINAL', 'labels': ['Cardinal', 'Card'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Numerals that do not fall under another type.
{ 'type': 'MISC', 'labels': ['Miscellaneus', 'Mix'], 'bgColor': entity_color, 'borderColor': 'darken' }, # Numerals that do not fall under another type.
],
'relation_types': [
{ 'type': 'acl', 'labels': ['adjectival clause', 'acl'], 'color': dependency_color},
{ 'type': 'advcl', 'labels': ['adverbial clause modifier', 'advcl'], 'color': dependency_color},
{ 'type': 'advmod', 'labels': ['adverbial modifier', 'advmod'], 'color': dependency_color},
{ 'type': 'amod', 'labels': ['adjectival modifier', 'amod'], 'color': dependency_color},
{ 'type': 'appos', 'labels': ['appositional modifier', 'appos'], 'color': dependency_color},
{ 'type': 'aux', 'labels': ['auxiliary', 'aux'], 'color': dependency_color},
{ 'type': 'case', 'labels': ['case marking', 'case'], 'color': dependency_color},
{ 'type': 'cc', 'labels': ['coordinating conjunction', 'cc'], 'color': dependency_color},
{ 'type': 'ccomp', 'labels': ['clausal complement', 'ccomp'], 'color': dependency_color},
{ 'type': 'clf', 'labels': ['classifier', 'clf'], 'color': dependency_color},
{ 'type': 'compound', 'labels': ['compound', 'compound'], 'color': dependency_color},
{ 'type': 'conj', 'labels': ['conjunct', 'conj'], 'color': dependency_color},
{ 'type': 'cop', 'labels': ['copula', 'cop'], 'color': dependency_color},
{ 'type': 'csubj', 'labels': ['clausal subject', 'csubj'], 'color': dependency_color},
{ 'type': 'dep', 'labels': ['unspecified dependency', 'dep'], 'color': dependency_color},
{ 'type': 'det', 'labels': ['determiner', 'det'], 'color': dependency_color},
{ 'type': 'discourse', 'labels': ['discourse element', 'discourse'], 'color': dependency_color},
{ 'type': 'dislocated', 'labels': ['dislocated elements', 'dislocated'], 'color': dependency_color},
{ 'type': 'expl', 'labels': ['expletive', 'expl'], 'color': dependency_color},
{ 'type': 'fixed', 'labels': ['fixed multiword expression', 'fixed'], 'color': dependency_color},
{ 'type': 'flat', 'labels': ['flat multiword expression', 'flat'], 'color': dependency_color},
{ 'type': 'goeswith', 'labels': ['goes with', 'goeswith'], 'color': dependency_color},
{ 'type': 'iobj', 'labels': ['indirect object', 'iobj'], 'color': dependency_color},
{ 'type': 'list', 'labels': ['list', 'list'], 'color': dependency_color},
{ 'type': 'mark', 'labels': ['marker', 'mark'], 'color': dependency_color},
{ 'type': 'nmod', 'labels': ['nominal modifier', 'nmod'], 'color': dependency_color},
{ 'type': 'nsubj', 'labels': ['nominal subject', 'nsubj'], 'color': dependency_color},
{ 'type': 'nummod', 'labels': ['numeric modifier', 'nummod'], 'color': dependency_color},
{ 'type': 'obj', 'labels': ['object', 'obj'], 'color': dependency_color},
{ 'type': 'obl', 'labels': ['oblique nominal', 'obl'], 'color': dependency_color},
{ 'type': 'orphan', 'labels': ['orphan', 'orphan'], 'color': dependency_color},
{ 'type': 'parataxis', 'labels': ['parataxis', 'parataxis'], 'color': dependency_color},
{ 'type': 'punct', 'labels': ['punctuation', 'punct'], 'color': dependency_color},
{ 'type': 'reparandum', 'labels': ['overridden disfluency', 'reparandum'], 'color': dependency_color},
{ 'type': 'root', 'labels': ['root', 'root'], 'color': dependency_color},
{ 'type': 'vocative', 'labels': ['vocative', 'vocative'], 'color': dependency_color},
{ 'type': 'xcomp', 'labels': ['open clausal complement', 'xcomp'], 'color': dependency_color},
# ENGLISH
# acl clausal modifier of noun (adjectival clause)
{ 'type': 'acomp', 'labels': ['adjectival complement', 'acomp'], 'color': dependency_color},
# advcl adverbial clause modifier
# advmod adverbial modifier
{ 'type': 'agent', 'labels': ['agent', 'agent'], 'color': dependency_color},
# amod adjectival modifier
# appos appositional modifier
{ 'type': 'attr', 'labels': ['attribute', 'attr'], 'color': dependency_color},
# aux auxiliary
{ 'type': 'auxpass', 'labels': ['auxiliary (passive)', 'auxpass'], 'color': dependency_color},
# case case marking
# cc coordinating conjunction
# ccomp clausal complement
# compound compound
# conj conjunct
# cop copula
# csubj clausal subject
{ 'type': 'csubjpass', 'labels': ['clausal subject (passive)', 'csubjpass'], 'color': dependency_color},
{ 'type': 'dative', 'labels': ['dative', 'dative'], 'color': dependency_color},
# dep unclassified dependent
# det determiner
# dobj direct object
# expl expletive
{ 'type': 'intj', 'labels': ['interjection', 'intj'], 'color': dependency_color},
# mark marker
{ 'type': 'meta', 'labels': ['meta modifier', 'meta'], 'color': dependency_color},
{ 'type': 'neg', 'labels': ['negation modifier', 'neg'], 'color': dependency_color},
{ 'type': 'nn', 'labels': ['noun compound modifier', 'nn'], 'color': dependency_color},
{ 'type': 'nounmod', 'labels': ['modifier of nominal', 'nounmod'], 'color': dependency_color},
{ 'type': 'npmod', 'labels': ['noun phrase as adverbial modifier', 'npmod'], 'color': dependency_color},
# nsubj nominal subject
{ 'type': 'nsubjpass', 'labels': ['nominal subject (passive)', 'nsubjpass'], 'color': dependency_color},
# nummod numeric modifier
{ 'type': 'oprd', 'labels': ['object predicate', 'oprd'], 'color': dependency_color},
# obj object
# obl oblique nominal
# parataxis parataxis
{ 'type': 'pcomp', 'labels': ['complement of preposition', 'pcomp'], 'color': dependency_color},
{ 'type': 'pobj', 'labels': ['object of preposition', 'pobj'], 'color': dependency_color},
{ 'type': 'poss', 'labels': ['possession modifier', 'poss'], 'color': dependency_color},
{ 'type': 'preconj', 'labels': ['pre-correlative conjunction', 'preconj'], 'color': dependency_color},
{ 'type': 'prep', 'labels': ['prepositional modifier', 'prep'], 'color': dependency_color},
{ 'type': 'prt', 'labels': ['particle', 'prt'], 'color': dependency_color},
# punct punctuation
{ 'type': 'quantmod', 'labels': ['modifier of quantifier', 'punctuation'], 'color': dependency_color},
{ 'type': 'relcl', 'labels': ['relative clause modifier', 'relcl'], 'color': dependency_color},
# root root
# xcomp open clausal complement
],
}
"""
collData = {
'entity_types': [
# The labels are used when displaying the annotation, in this case
# for "Person" we also provide a short-hand "Per" for cases where
# abbreviations are preferable
{
'type' : 'Person',
'labels' : ['Person', 'Per'],
'bgColor': 'royalblue',
'borderColor': 'darken'
}
],
'relation_types': [
# A relation takes two arguments, both are named and can be constrained
# as to which types they may apply to
# dashArray allows you to adjust the style of the relation arc
{ 'type': 'Anaphora', 'labels': ['Anaphora', 'Ana'], 'dashArray': '3,3', 'color': 'purple',
'args': [
{'role': 'Anaphor', 'targets': ['Person'] },
{'role': 'Entity', 'targets': ['Person'] },]
}
],
}
"""
docData = {
# This example (from https://brat.nlplab.org/embed.html) was kept here just for reference
'text' : "Ed O'Kelley was the man who shot the man who shot Jesse James.",
# The entities entry holds all entity annotations
'entities' : [
# Format: [${ID}, ${TYPE}, [[${START}, ${END}]]]
# note that range of the offsets are [${START},${END})
['T1', 'Person', [[0, 11]]],
['T2', 'Person', [[20, 23]]],
['T3', 'Person', [[37, 40]]],
['T4', 'Person', [[50, 61]]]
],
'relations': [
# Format: [${ID}, ${TYPE}, [[${ARGNAME}, ${TARGET}], [${ARGNAME}, ${TARGET}]]]
['R1', 'Anaphora', [['Anaphor', 'T2'], ['Entity', 'T1']]]
],
};
def count_word_syllables(word, language_code):
n_chars = len(word)
word = word + ' '
n_syllables = 0
if language_code == 'en': # see: https://medium.com/@mholtzscher/programmatically-counting-syllables-ca760435fab4
vowels = 'aeiouy'
if word[0] in vowels:
n_syllables += 1
for index in range(1, n_chars):
if word[index] in vowels and word[index - 1] not in vowels:
n_syllables += 1
if word.endswith('e'):
n_syllables -= 1
if word.endswith('le') and n_chars > 2 and word[-3] not in vowels:
n_syllables += 1
if n_syllables == 0:
n_syllables = 1
elif language_code == 'it': # see: https://it.comp.programmare.narkive.com/TExPlcuC/programma-di-sillabazione
vowels = 'aeiouy'
hard_cons = 'bcdfgjpqstvwxz'
liquid_cons = 'hlmnr'
cons = hard_cons + liquid_cons
if word[0] in vowels:
n_syllables += 1
for index in range(1, n_chars):
c = word[index]
if c in cons:
if word[index - 1] == c:
n_syllables += 1
elif c == 's':
pass
elif c in liquid_cons and word[index + 1] in cons and word[index + 2] in vowels:
n_syllables += 1
elif c in liquid_cons and word[index + 1] in liquid_cons and word[index + 2] in vowels:
n_syllables += 1
else:
if c == 's':
n_syllables += 1
elif word[index + 1] in hard_cons and (word[index + 2] in vowels or word[index + 2] in liquid_cons):
n_syllables += 1
elif word[index + 1] in liquid_cons and word[index + 2] in vowels:
n_syllables += 1
elif index == n_chars-1:
n_syllables += 1
elif language_code == 'es':
from commons.lang.es.utils import silabizer as es_syllabizer
syllabizer = es_syllabizer()
syllables = syllabizer(word)
n_syllables = len(syllables) - 1
elif language_code == 'el':
from commons.lang.el.utils import count_word_syllables as count_word_syllables_el
n_syllables = count_word_syllables_el(word)
else:
n_syllables = n_chars/2
return max(1, int(n_syllables))
obj_type_to_class_dict = {
'project': Project,
'oer': OER,
'lp': LearningPath,
'pathnode': PathNode,
'doc': Document,
'flatpage': FlatPage,
}
def get_web_resource_text(url):
err = None
try:
response = requests.get(url)
except ConnectionError as err:
return '', response, err
except requests.exceptions.RequestException as err:
return '', response, err
if not (response.status_code == 200):
return '', response, err
text = ''
encoding = 'utf8'
content_type = response.headers['content-type']
if content_type.count('text/plain'):
text = response.text
elif content_type.count('text/html') or url.endswith('.htm'):
text = response.text
text = readability.Document(text).summary()
text = extract_annotate_with_bs4(text)
else:
with tempfile.NamedTemporaryFile(dir='/tmp', mode='w+b') as f:
for chunk in response.iter_content(1024):
f.write(chunk)
if content_type.count('pdf'):
text = textract.process(f.name, encoding=encoding, extension='pdf')
elif content_type.count('rtf'):
text = textract.process(f.name, encoding=encoding, extension='rtf')
elif content_type.count('msword'):
text = textract.process(f.name, encoding=encoding, extension='doc')
elif content_type.count('officedocument.wordprocessingml') and content_type.count('document'):
text = textract.process(f.name, encoding=encoding, extension='docx')
elif content_type.count('officedocument.presentationml'):
text = textract.process(f.name, encoding=encoding, extension='pptx')
elif content_type.count('officedocument.spreadsheetml'):
text = textract.process(f.name, encoding=encoding, extension='xlsx')
f.close()
try:
text = text.decode()
except (UnicodeDecodeError, AttributeError) as err:
return '', response, err
return text, response, err
def get_document_text(document, return_has_text=False):
has_text = False
text = ''
version = document.latest_version
mimetype = version.mimetype
encoding = 'utf8'
if mimetype.count('text'): # if mimetype.endswith('text'):
has_text = True
if mimetype.count('text/plain'):
has_text = True
if not return_has_text:
text = textract.process(version.file.path, encoding=encoding, extension='txt')
elif mimetype.count('pdf'): # elif mimetype.endswith('pdf'):
has_text = True
if not return_has_text:
text = textract.process(version.file.path, encoding=encoding, extension='pdf')
elif mimetype.count('rtf'): # elif mimetype.endswith('rtf'):
has_text = True
if not return_has_text:
text = textract.process(version.file.path, encoding=encoding, extension='rtf')
elif mimetype.count('msword'): # elif mimetype.endswith('msword'):
has_text = True
if not return_has_text:
text = textract.process(version.file.path, encoding=encoding, extension='doc')
elif mimetype.count('officedocument.wordprocessingml') and mimetype.count('document'):
has_text = True
if not return_has_text:
text = textract.process(version.file.path, encoding=encoding, extension='docx')
elif mimetype.count('officedocument.presentationml'):
has_text = True
if not return_has_text:
text = textract.process(version.file.path, encoding=encoding, extension='pptx')
elif mimetype.count('officedocument.spreadsheetml'):
has_text = True
if not return_has_text:
text = textract.process(version.file.path, encoding=encoding, extension='xlsx')
else:
split_label = document.label.split('.')
if len(split_label) > 1:
extension = split_label[-1]
if extension in ['csv', 'doc', 'docx', 'eml', 'epub', 'htm', 'html', 'json', 'msg', 'odt', 'pdf', 'pptx', 'ps', 'rtf', 'txt', 'xslx', 'xss',]:
has_text = True
if not return_has_text:
text = textract.process(version.file.path, encoding=encoding, extension=extension)
if return_has_text:
return has_text
else:
try:
text = text.decode()
except (UnicodeDecodeError, AttributeError):
pass
return text
def get_oer_text(oer, return_has_text=False):
text = ''
if oer.url:
try:
response = requests.get(oer.url)
if response.status_code == 200 and response.headers['content-type'].count('text'):
text = response.text
if not return_has_text:
text = readability.Document(text).summary()
except:
text = ''
elif oer.text:
text = oer.text
else:
documents = oer.get_sorted_documents()
if documents:
text = get_document_text(documents[0], return_has_text=return_has_text)
return text
def extract_annotate_with_bs4(html):
soup = BeautifulSoup(html, 'lxml')
headings = soup.find_all(re.compile('h.+'))
for heading in headings:
name = heading.name
level = name[1:]
if level.isdigit():
text = heading.text
if text and not text[-1] in string.punctuation:
heading.append('.')
lis = soup.find_all('li')
for li in lis:
text = li.text
if text:
if not text[-1] in string.punctuation:
li.append(';')
return soup.get_text()
def get_obj_text(obj, obj_type=None, obj_id=None, return_has_text=True, with_children=True):
# if obj:
if obj and not obj_type:
if isinstance(obj, Project):
obj_type = 'project'
elif isinstance(obj, OER):
obj_type = 'oer'
elif isinstance(obj, LearningPath):
obj_type = 'lp'
elif isinstance(obj, PathNode):
obj_type = 'pathnode'
elif isinstance(obj, Document):
obj_type = 'doc'
elif isinstance(obj, FlatPage):
obj_type = 'flatpage'
text = ''
if obj_type == 'project':
if not obj:
obj = get_object_or_404(Project, id=obj_id)
json_metadata = ProjectSerializer(obj).data
title = json_metadata['name']
description = json_metadata['description']
text = json_metadata['info']
elif obj_type == 'oer':
if not obj:
obj = get_object_or_404(OER, id=obj_id)
text = get_oer_text(obj, return_has_text=return_has_text)
if not return_has_text:
text = extract_annotate_with_bs4(text)
json_metadata = OerSerializer(obj).data
title = json_metadata['title']
description = json_metadata['description']
elif obj_type == 'lp':
if not obj:
obj = get_object_or_404(LearningPath, id=obj_id)
json_metadata = LearningPathSerializer(obj).data
title = json_metadata['title']
description = json_metadata['short']
lp_text = json_metadata['long']
if with_children:
nodes = obj.get_ordered_nodes()
for node in nodes:
title, description, text = node.get_obj_text(return_has_text=False)
text = '{}, {}. {}'.format(title, title, text)
lp_text += text
text = lp_text
elif obj_type == 'pathnode':
if not obj:
obj = get_object_or_404(PathNode, id=obj_id)
json_metadata = PathNodeSerializer(obj).data
title = json_metadata['label']
description = ''
oer = obj.oer
if oer:
text = get_oer_text(oer, return_has_text=return_has_text)
else:
document = obj.document
if document:
text = get_document_text(document, return_has_text=return_has_text)
else:
text = json_metadata['text']
if text and not return_has_text:
text = extract_annotate_with_bs4(text)
elif obj_type == 'doc':
if not obj:
obj = get_object_or_404(Document, id=obj_id)
title = obj.label
description = ''
text = get_document_text(obj)
elif obj_type == 'flatpage':
if not obj:
obj = get_object_or_404(FlatPage, id=obj_id)
title = obj.title
description = ""
text = extract_annotate_with_bs4(obj.content)
if return_has_text:
return text
else:
return title, description, text
PathNode.get_obj_text = get_obj_text
def index_sentences(sentences, tokens):
i = 0
for sentence in sentences:
assert sentence['start']==tokens[i]['start']
end = sentence['end']
sentence['start_token'] = i
while tokens[i]['end'] < end:
i += 1
sentence['end_token'] = i
i += 1
def make_sentence_tree(sentence, tokens):
i_root = None
i = sentence['start_token']
if hasattr(sentence, 'root'):
root = sentence.root
else:
root = None
text = ''
while i <= sentence['end_token']:
token = tokens[i]
text += token['text']
dep = token['dep']
# if i_root is None and dep=='ROOT':
# see: https://github.com/explosion/spaCy/issues/10003
# see: https://stackoverflow.com/questions/36610179/how-to-get-the-dependency-tree-with-spacy
if i_root is None and (dep=='ROOT' or dep=='dep' or i_root==root):
i_root = sentence['root'] = i
elif dep:
head = tokens[token['head']]
if not head.get('children', []):
head['children'] = []
head['children'].append(i)
i += 1
assert i_root is not None
sentence['root'] = i_root
sentence['text'] = text
return i-sentence['start_token']
def token_dependency_depth(token, depth, tokens):
max_depth = depth
for i in token.get('children', []):
max_depth = max(max_depth, 1+token_dependency_depth(tokens[i], depth, tokens))
return max_depth
def sentence_dependency_depth(sentence, tokens):
root = tokens[sentence['root']]
return token_dependency_depth(root, 0, tokens)
def token_dependency_distance(token, max_distance, tokens):
i_token = token['id']
for i in token.get('children', []):
max_distance = max(max_distance, abs(i-i_token), token_dependency_distance(tokens[i], max_distance, tokens))
return max_distance
def sentence_dependency_distance(sentence, tokens):
root = tokens[sentence['root']]
return token_dependency_distance(root, 0, tokens)
def index_entities(ents, tokens, entity_dict):
i = 0
for ent in ents:
label = ent['label']
start = ent['start']
end = ent['end']
while tokens[i]['start'] < start:
i += 1
assert start==tokens[i]['start']
text = ''
try: # don't know why in one case the condition below raised exception
while tokens[i]['end'] <= end:
text += tokens[i]['text']
i += 1
except:
pass
ent['text'] = text
if not '_' in text and not text in entity_dict[label]:
entity_dict[label].append(text)
def add_to_default_dict(default_dict, token, case_dict=None):
if (len(token)>1 and token.isupper()) or token.islower():
default_dict[token] +=1
elif default_dict.get(token.lower(), ''):
default_dict[token.lower()] +=1
else:
default_dict[token] +=1
def sorted_frequencies(d):
sd = OrderedDict(sorted(d.items(), key = itemgetter(1), reverse = True))
return [{'key': key, 'freq': freq} for key, freq in sd.items()]
# token_level_dict = {}
token_level_dict = defaultdict(lambda:'c2')
def map_token_pos_to_level(language_code):
module_name = 'commons.lang.{0}.basic_vocabulary_{0}'.format(language_code)
module = import_module(module_name)
voc = getattr(module, 'voc_'+language_code)
for item in voc:
assert len(item) >= 3
# token_level_dict['_'.join(item[:2])] = item[2]
key = '_'.join(item[:2])
token_level_dict[key] = min(item[2].lower(), token_level_dict[key])
language_code_dict = {
'english': 'en',
'italian': 'it',
'italiano': 'it',
'spanish': 'es',
'español': 'es',
'greek': 'el',
'greek': 'el',
'ελληνικά': 'el',
}
off_error = _('sorry, it looks like the language processing service is off')
def add_level_to_frequencies(frequencies, pos):
for frequency in frequencies:
key = '_'.join([frequency['key'].lower(), pos])
level = token_level_dict.get(key, None)
if level:
frequency['level'] = level
frequency[level[0]] = True
elif frequency['key'].islower():
frequency['level'] = 'c2'
frequency['c'] = True
def text_dashboard_return(request, var_dict):
if not var_dict:
var_dict = { 'error': off_error }
if request.is_ajax():
return JsonResponse(var_dict)
else:
return var_dict # only for manual test
def text_dashboard(request, obj_type, obj_id, file_key='', obj=None, title='', body='', wordlists=False, readability=False, nounchunks=False):
""" here (originally only) through ajax call from the template 'vue/text_dashboard.html' """
if not file_key and not obj_type in ['project', 'oer', 'lp', 'pathnode', 'doc', 'flatpage', 'resource', 'text',]:
return HttpResponseForbidden()
if file_key:
pass
else:
if obj_type == 'text':
title, description, body = ['', '', request.session.get('text', '')]
elif obj_type == 'resource':
title = ''
description = ''
body, response, err = get_web_resource_text(obj_id)
if not body:
if err:
return text_dashboard_return(request, { 'error': err.value })
else:
return text_dashboard_return(request, { 'error': response.status_code })
else:
title, description, text = get_obj_text(obj, obj_type=obj_type, obj_id=obj_id, return_has_text=False)
body = '{}, {}. {}'.format(title, description, text)
data = json.dumps({'text': body})
endpoint = nlp_url + '/api/analyze'
try:
response = requests.post(endpoint, data=data)
except:
response = None
if not response or response.status_code!=200:
return text_dashboard_return(request, {})
analyze_dict = response.json()
language_code = analyze_dict['language']
language = Language.objects.get(code=language_code).name
map_token_pos_to_level(language_code)
analyzed_text = analyze_dict['text']
summary = analyze_dict['summary']
obj_type_label = obj_type_label_dict[obj_type]
var_dict = { 'obj_type': obj_type, 'obj_id': obj_id, 'description': description, 'title': title, 'obj_type_label': obj_type_label, 'language_code': language_code, 'language': language, 'text': body, 'analyzed_text': analyzed_text, 'summary': summary }
if nounchunks:
ncs = analyze_dict['noun_chunks']
noun_chunks = []
for nc in ncs:
nc = nc.replace('\n', ' ').replace('\xa0', ' ')
tokens = nc.split()
if len(tokens)>1:
noun_chunks.append(' '.join(tokens))
noun_chunks = [nc for nc in noun_chunks if len(nc.split())>1]
var_dict['noun_chunks'] = noun_chunks
text = analyze_dict['text']
sentences = analyze_dict['sents']
var_dict['n_sentences'] = n_sentences = len(sentences)
tokens = analyze_dict['tokens']
var_dict['n_tokens'] = n_tokens = len(tokens)
ents = analyze_dict.get('ents', [])
kw_frequencies = defaultdict(int)
adjective_frequencies = defaultdict(int)
noun_frequencies = defaultdict(int)
verb_frequencies = defaultdict(int)
adverb_frequencies = defaultdict(int)
n_lexical = 0
if readability:
n_words = 0
n_hard_words = 0
n_word_characters = 0
n_word_syllables = 0
for item in tokens:
token = text[item['start']:item['end']]
item['text'] = token
pos = item['pos']
if readability: # and not pos in ['SPACE', 'PUNCT',]:
n_words += 1
word_characters = len(token)
n_word_characters += word_characters
word_syllables = count_word_syllables(token, language_code)
n_word_syllables += word_syllables
if word_syllables > 2:
n_hard_words += 1
lemma = item['lemma']
if token.isnumeric() or pos in EMPTY_POS or item['stop']:
continue
n_lexical += 1
add_to_default_dict(kw_frequencies, token)
if pos in ['NOUN', 'PROPN']:
add_to_default_dict(noun_frequencies, lemma)
elif pos == 'VERB':
add_to_default_dict(verb_frequencies, lemma)
elif pos == 'ADJ':
add_to_default_dict(adjective_frequencies, lemma)
elif wordlists and pos == 'ADV':
add_to_default_dict(adverb_frequencies, lemma)
if readability:
var_dict['n_words'] = n_words
var_dict['n_hard_words'] = n_hard_words
var_dict['n_word_characters'] = n_word_characters
var_dict['n_word_syllables'] = n_word_syllables
n_unique = len(kw_frequencies)
voc_density = n_tokens and n_unique/n_tokens or 0
lex_density = n_tokens and n_lexical/n_tokens or 0
kw_frequencies = sorted_frequencies(kw_frequencies)
verb_frequencies = sorted_frequencies(verb_frequencies)
noun_frequencies = sorted_frequencies(noun_frequencies)
adjective_frequencies = sorted_frequencies(adjective_frequencies)
adverb_frequencies = sorted_frequencies(adverb_frequencies)
if token_level_dict:
add_level_to_frequencies(verb_frequencies, 'verb')
add_level_to_frequencies(noun_frequencies, 'noun')
add_level_to_frequencies(adjective_frequencies, 'adjective')
add_level_to_frequencies(adverb_frequencies, 'adverb')
var_dict.update({'verb_frequencies': verb_frequencies, 'noun_frequencies': noun_frequencies,
'adjective_frequencies': adjective_frequencies, 'adverb_frequencies': adverb_frequencies,})
if wordlists:
return var_dict
mean_sentence_length = n_tokens/n_sentences
index_sentences(sentences, tokens)
max_sentence_length = 0
max_dependency_depth = 0
tot_dependency_depth = 0
max_dependency_distance = 0
tot_dependency_distance = 0
max_weighted_distance = 0
tot_weighted_distance = 0
for sentence in sentences:
sentence_length = make_sentence_tree(sentence, tokens)
max_sentence_length = max(max_sentence_length, sentence_length)
depth = sentence_dependency_depth(sentence, tokens)
max_dependency_depth = max(max_dependency_depth, depth)
tot_dependency_depth += depth
distance = sentence_dependency_distance(sentence, tokens)
max_dependency_distance = max(max_dependency_distance, distance)
tot_dependency_distance += distance
weighted_distance = distance / sentence_length
max_weighted_distance = max(max_weighted_distance, weighted_distance)
tot_weighted_distance += weighted_distance
mean_dependency_depth = n_sentences and (tot_dependency_depth / n_sentences) or 0
mean_dependency_distance = n_sentences and (tot_dependency_distance / n_sentences) or 0
mean_weighted_distance = n_sentences and (tot_weighted_distance / n_sentences) or 0
entitiy_dict = defaultdict(list)
index_entities(ents, tokens, entitiy_dict)
entity_lists = [{'key': key, 'entities': entities} for key, entities in entitiy_dict.items()]
var_dict.update({'n_unique': n_unique, 'voc_density': voc_density, 'lex_density': lex_density,
'mean_sentence_length': mean_sentence_length, 'max_sentence_length': max_sentence_length,
'max_dependency_depth': max_dependency_depth, 'mean_dependency_depth': mean_dependency_depth,
'max_dependency_distance': max_dependency_distance, 'mean_dependency_distance': mean_dependency_distance,
'max_weighted_distance': max_weighted_distance, 'mean_weighted_distance': mean_weighted_distance,
'sentences': sentences, 'tokens': tokens,
'kw_frequencies': kw_frequencies[:16],
'entity_lists': entity_lists, 'entities': ents,
'collData': collData, 'docData': docData,
})
return text_dashboard_return(request, var_dict)
"""
def project_text(request, project_id):
project = get_object_or_404(Project, id=project_id)
var_dict = {'obj_type': 'project', 'obj_id': project.id}
return render(request, 'vue/text_dashboard.html', var_dict)
def oer_text(request, oer_id):
oer = get_object_or_404(OER, id=oer_id)
var_dict = {'obj_type': 'oer', 'obj_id': oer.id}
return render(request, 'vue/text_dashboard.html', var_dict)
def lp_text(request, lp_id):
lp = get_object_or_404(LearningPath, id=lp_id)
var_dict = {'obj_type': 'lp', 'obj_id': lp.id}
return render(request, 'vue/text_dashboard.html', var_dict)
def pathnode_text(request, node_id):
pathnode = get_object_or_404(PathNode, id=node_id)
var_dict = {'obj_type': 'pathnode', 'obj_id': pathnode.id}
return render(request, 'vue/text_dashboard.html', var_dict)
def doc_text(request, doc_id):
document = get_object_or_404(Document, id=doc_id)
var_dict = {'obj_type': 'doc', 'obj_id': document.id}
return render(request, 'vue/text_dashboard.html', var_dict)
def flatpage_text(request, flatpage_id):
flatpage = get_object_or_404(FlatPage, id=flatpage_id)
var_dict = {'obj_type': 'flatpage', 'obj_id': flatpage.id}
return render(request, 'vue/text_dashboard.html', var_dict)
"""
def brat(request):
return render(request, 'vue/brat.html', {})
def lp_compare_nodes(request, lp_slug):
if lp_slug.isdigt():
lp = get_object_or_404(LearningPath, id=lp_slug)
else:
lp = get_object_or_404(LearningPath, slug=lp_slug)
nodes = lp.get_ordered_nodes()
user_key = '{id:05d}'.format(id=request.user.id)
endpoint = nlp_url + '/api/delete_corpus/'
data = json.dumps({'user_key': user_key})
response = requests.post(endpoint, data=data)
if not response.status_code==200:
data = {'status': response.status_code}
return JsonResponse(data)
endpoint = nlp_url + '/api/add_doc/'
for node in nodes:
title, description, text = node.get_obj_text(return_has_text=False)
text = '{}, {}. {}'.format(title, title, text)
doc_key = '{id:05d}'.format(id=node.id)
data = json.dumps({'user_key': user_key, 'doc_key': doc_key, 'text': text})
response = requests.post(endpoint, data=data)
if not response.status_code==200:
data = {'status': response.status_code}
return JsonResponse(data)
endpoint = nlp_url + '/api/compare_docs/'
data = json.dumps({'user_key': user_key, 'language': lp.original_language})
response = requests.post(endpoint, data=data)
if response and response.status_code==200:
data = response.json()
return JsonResponse(data)
else:
data = {'status': response.status_code}
return JsonResponse(data)
TEXT_MIMETYPE_KEYS = (
'text',
'pdf',
'rtf',
'msword',
'wordprocessingml.document',
'officedocument.wordprocessingml',
)
# def contents_dashboard(request):
@csrf_exempt
def ajax_contents(request):
user = request.user
data = json.loads(request.body.decode('utf-8'))
project_id = data['project_id']
user_key = '{id:05d}'.format(id=request.user.id)
endpoint = nlp_url + '/api/get_corpora/'
data = json.dumps({'user_key': user_key})
response = requests.post(endpoint, data=data)
if not response.status_code==200:
return propagate_remote_server_error(response)
data = response.json()
corpora = data['corpora']
if project_id:
data = project_contents(project_id)
else: # if user.is_authenticated:
data = user_contents(user)
data['corpora'] = corpora
return JsonResponse(data)
def ajax_lp_nodes(request, lp_id):
lp = get_object_or_404(LearningPath, id=lp_id)
nodes = lp.get_ordered_nodes()
data = {'nodes': [{'obj_id': node.id, 'label': node.label, 'url': node.get_absolute_url()} for node in nodes]}
return JsonResponse(data)
def propagate_remote_server_error(response):
ajax_response = JsonResponse({"error": "Remote server error"})
ajax_response.status_code = response.status_code
return ajax_response
@csrf_exempt
def ajax_new_corpus(request):
user_key = '{id:05d}'.format(id=request.user.id)
endpoint = nlp_url + '/api/new_corpus/'
data = json.dumps({'user_key': user_key})
response = requests.post(endpoint, data=data)
if not response.status_code==200:
return propagate_remote_server_error(response)
data = response.json()
file_key = data['file_key']
result = {'file_key': file_key}
return JsonResponse(result)
@csrf_exempt
def ajax_insert_item(request):
data = json.loads(request.body.decode('utf-8'))
file_key = data['file_key']
index = data['index']
item = data['item']
obj_type = item['obj_type']
obj_id = item['obj_id']
url = item['url']
title, description, text = get_obj_text(None, obj_type=obj_type, obj_id=obj_id, return_has_text=False, with_children=True)
text = ". ".join([title, description, text])
data = json.dumps({'file_key': file_key, 'index': index, 'obj_type': obj_type, 'obj_id': obj_id, 'label': title, 'url': url, 'text': text})
endpoint = nlp_url + '/api/add_doc/'
response = requests.post(endpoint, data=data)
if not response.status_code==200:
return propagate_remote_server_error(response)
data = response.json()
file_key = data['file_key']
if file_key:
result = {'file_key': file_key, 'index': index, 'language': data['language'], 'n_tokens': data['n_tokens'], 'n_words': data['n_words']}
else:
result = {'file_key': file_key, 'error': 'languages cannot be mixed in corpus'}
return JsonResponse(result)
"""
called from contents_dashboard template to remove an item (doc) from a corpus (docbin)
"""
@csrf_exempt
def ajax_remove_item(request):
endpoint = nlp_url + '/api/remove_doc/'
data = json.loads(request.body.decode('utf-8'))
file_key = data['file_key']
obj_type = data['obj_type']
obj_id = data['obj_id']
data = json.dumps({'file_key': file_key, 'obj_type': obj_type, 'obj_id': obj_id})
response = requests.post(endpoint, data=data)
if response.status_code==200:
data = response.json()
index = data['index']
result = {'index': index}
return JsonResponse(result)
else:
return propagate_remote_server_error(response)
"""
called from contents_dashboard template to make a corpus of a list of resources
and return summary information on the application of the spaCy pipleline
"""
@csrf_exempt
def ajax_make_corpus(request):
data = json.loads(request.body.decode('utf-8'))
resources = data['items']
user_key = '{id:05d}'.format(id=request.user.id)
file_key = ''
endpoint = nlp_url + '/api/add_doc/'
processed = []
for resource in resources:
obj_type = resource['obj_type']
obj_id = resource['obj_id']
url = resource['url']
title, description, text = get_obj_text(None, obj_type=obj_type, obj_id=obj_id, return_has_text=False, with_children=True)
text = ". ".join([title, description, text])
data = json.dumps({'file_key': file_key, 'user_key': user_key, 'obj_type': obj_type, 'obj_id': obj_id, 'label': title, 'url': url, 'text': text})
response = requests.post(endpoint, data=data)
if not response.status_code==200:
return propagate_remote_server_error(response)
data = response.json()
file_key = data['file_key']
data.update({'obj_type': obj_type, 'obj_id': obj_id, 'label': title})
processed.append(data)
return JsonResponse({'result': processed, 'file_key': file_key})
"""
called from contents_dashboard template
to list corpora associated to a user or a project
"""
@csrf_exempt
def ajax_get_corpora(request):
data = json.loads(request.body.decode('utf-8'))
project_key = data.get('project_key', '')
if not project_key:
user_key = '{id:05d}'.format(id=request.user.id)
endpoint = nlp_url + '/api/get_corpora/'
data = json.dumps({'user_key': user_key, 'project_key': project_key})
response = requests.post(endpoint, data=data)
if response.status_code==200:
corpora = response.json()
return JsonResponse(corpora)
else:
return propagate_remote_server_error(response)
"""
called from contents_dashboard template
to compare the texts of a list of resources
"""
@csrf_exempt
def ajax_compare_resources(request):
data = json.loads(request.body.decode('utf-8'))
resources = data['items']
n = len(resources)
if n == 0 or (n == 1 and resources[0]['obj_type'] != 'lp'):
ajax_response = JsonResponse({"error": "Need at least 2 items"})
ajax_response.status_code = 404
return ajax_response
elif n == 1:
return lp_compare_nodes(request, resources[0]['obj_id'])
else:
user_key = '{id:05d}'.format(id=request.user.id)
endpoint = nlp_url + '/api/delete_corpus/'
data = json.dumps({'user_key': user_key})
response = requests.post(endpoint, data=data)
if not response.status_code==200:
return propagate_remote_server_error(response)
endpoint = nlp_url + '/api/add_doc/'
last_language = None
for resource in resources:
title, description, text = get_obj_text(None, obj_type=resource['obj_type'], obj_id=resource['obj_id'], return_has_text=False, with_children=True)
text = '{}, {}. {}'.format(title, title, text)
doc_key = '{id:05d}'.format(id=resource['obj_id'])
data = json.dumps({'user_key': user_key, 'doc_key': doc_key, 'text': text})
response = requests.post(endpoint, data=data)
if not response.status_code==200:
return propagate_remote_server_error(response)
data = response.json()
language = data.get('language', '')
if last_language and language!=last_language:
ajax_response = JsonResponse({"error": "All items must have same language"})
ajax_response.status_code = 404
return ajax_response
last_language = language
endpoint = nlp_url + '/api/compare_docs/'
data = json.dumps({'user_key': user_key, 'language': language})
response = requests.post(endpoint, data=data)
if response.status_code==200:
result = response.json()
return JsonResponse(result)
else:
return propagate_remote_server_error(response)
"""
called from contents_dashboard template
to delete an entire corpus (docbin)
"""
@csrf_exempt
def ajax_delete_corpus(request):
endpoint = nlp_url + '/api/delete_corpus/'
data = json.loads(request.body.decode('utf-8'))
file_key = data['file_key']
data = json.dumps({'file_key': file_key})
response = requests.post(endpoint, data=data)
if response.status_code==200:
result = response.json()
file_key = result['file_key']
data = {'file_key': file_key}
return JsonResponse(data)
else:
return propagate_remote_server_error(response)
@csrf_exempt
def text_wordlists(request, file_key='', obj_type='', obj_id=''):
var_dict = {'file_key': file_key, 'obj_type': obj_type, 'obj_id': obj_id}
if request.is_ajax():
keys = ['verb_frequencies', 'noun_frequencies', 'adjective_frequencies', 'adverb_frequencies',
'obj_type_label', 'title', 'language']
data = var_dict
dashboard_dict = text_dashboard(request, file_key=file_key, obj_type=obj_type, obj_id=obj_id, wordlists=True)
data.update([[key, dashboard_dict[key]] for key in keys])
return JsonResponse(data)
else:
return render(request, 'vue/text_wordlists.html', var_dict)
"""
called from contents_dashboard or text_analysis template
to find and sort document or corpus keywords and to list keyword in context
"""
@csrf_exempt
def context_dashboard(request, file_key='', obj_type='', obj_id=''):
var_dict = {'file_key': file_key, 'obj_type': obj_type, 'obj_id': obj_id}
if request.is_ajax():
if not file_key:
var_dict['text'] = request.session.get('text', '')
endpoint = nlp_url + '/api/word_contexts/'
data = json.dumps(var_dict)
response = requests.post(endpoint, data=data)
result = response.json()
var_dict['language'] = result['language']
var_dict['keywords'] = result['keywords']
var_dict['kwics'] = result['kwics']
return JsonResponse(var_dict)
else:
return render(request, 'vue/context_dashboard.html', var_dict)
def text_summarization(request, params={}):
var_dict = params
text = request.session.get('text', '')
data = json.dumps({'text': text})
endpoint = nlp_url + '/api/analyze'
try:
response = requests.post(endpoint, data=data)
except:
response = None
if response and response.status_code == 200:
analyze_dict = response.json()
var_dict['language'] = analyze_dict['language']
var_dict['text'] = text
var_dict['summary'] = analyze_dict['summary']
else:
var_dict['error'] = off_error
return render(request, 'text_summarization.html', var_dict)
readability_indexes = {
'flesch_easy': { 'languages': ['en'], 'title': "Flesch Reading Ease score for English (0-100)", 'ref': 'https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests' },
'franchina_vacca_1972': { 'languages': ['it'], 'title': "Franchina-Vacca readability index for Italian (0-100)", 'ref': 'https://it.wikipedia.org/wiki/Formula_di_Flesch' },
'gulp_ease': { 'languages': ['it'], 'title': "GULP readability index for Italian (0-100)", 'ref': 'https://it.wikipedia.org/wiki/Indice_Gulpease' },
'kincaid_flesh': { 'languages': ['en'], 'title': "Flesch–Kincaid grade level for English (Very easy-Extra difficult)", 'ref': 'https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests' },
'fernandez_huerta': { 'languages': ['es'], 'title': "Fernandez Huerta readability index for Spanish (0-100)", 'ref': 'https://legible.es/blog/lecturabilidad-fernandez-huerta/' },
'gagatsis_1985': { 'languages': ['el'], 'title': "Gagatsis readability index for Greek (0-100)", 'ref': 'http://www.sfs.uni-tuebingen.de/~dm/papers/Georgatou-16.pdf' },
}
# gagatsis_1985. see: http://www.sfs.uni-tuebingen.de/~dm/papers/Georgatou-16.pdf
readability_scales = {
'flesch_easy': [[90, 100, 'very easy'], [80, 90, 'easy'], [70, 80, 'fairly easy'], [60, 70, 'intermediate'], [50, 60, 'fairly difficult'], [30, 50, 'difficult'], [0, 30, 'very difficult'],],
'kincaid_flesh': [[90, 100, '5th grade'], [80, 90, '6th grade'], [70, 80, '7th grade'], [60, 70, '8-9th grade'], [50, 60, '10-12 grade'], [30, 50, 'college'], [10, 30, 'college graduate'], [0, 10, 'professional'],]
}
def readability_level(scale, score):
score = int(score)
scale = readability_scales[scale]
for range in scale:
if score >= range[0] and score <= range[1]:
return range[2]
return 'out of scale'
def text_readability(request, params={}):
var_dict = text_dashboard(request, 'text', 0, readability=True)
error = var_dict.get('error', None)
if error:
print('error:', error)
else:
var_dict.update(params)
language_code = var_dict['language_code']
n_words = var_dict['n_words'] or 1
var_dict['mean_chars_per_word'] = var_dict['n_word_characters'] / n_words
var_dict['mean_syllables_per_word'] = var_dict['n_word_syllables'] / n_words
var_dict['readability_indexes'] = {}
index = readability_indexes['flesch_easy']
if language_code in index['languages']:
index['value'] = 206.835 - 1.015 * var_dict['mean_sentence_length'] - 84.6 * var_dict['mean_syllables_per_word']
index['range'] = readability_level('flesch_easy', index['value'])
var_dict['readability_indexes']['flesch_easy'] = index
index = readability_indexes['kincaid_flesh']
if language_code in index['languages']:
index['value'] = 0.39 * var_dict['mean_sentence_length'] + 11.8 * var_dict['mean_syllables_per_word'] - 15.59
index['range'] = readability_level('kincaid_flesh', index['value'])
var_dict['readability_indexes']['kincaid_flesh'] = index
index = readability_indexes['franchina_vacca_1972']
if language_code in index['languages']:
index['value'] = 206 - var_dict['mean_sentence_length'] - 65 * var_dict['mean_syllables_per_word']
index['range'] = readability_level('flesch_easy', index['value'])
var_dict['readability_indexes']['franchina_vacca_1972'] = index
index = readability_indexes['gulp_ease']
if language_code in index['languages']:
index['value'] = 89 - 10 * var_dict['mean_chars_per_word'] + 100 * var_dict['n_sentences'] / n_words
index['range'] = readability_level('flesch_easy', index['value'])
var_dict['readability_indexes']['gulp_ease'] = index
index = readability_indexes['fernandez_huerta']
if language_code in index['languages']:
index['value'] = 206.84 - 1.02 * var_dict['mean_sentence_length'] - 60 * var_dict['mean_syllables_per_word']
index['range'] = readability_level('flesch_easy', index['value'])
var_dict['readability_indexes']['gulp_ease'] = index
index = readability_indexes['gagatsis_1985']
if language_code in index['languages']:
index['value'] = 206.835 - 1.015 * var_dict['mean_sentence_length'] - 59 * var_dict['mean_syllables_per_word']
index['range'] = readability_level('flesch_easy', index['value'])
var_dict['readability_indexes']['gagatsis_1985'] = index
return render(request, 'text_readability.html', var_dict)
def text_analysis_input(request):
var_dict = {}
if request.POST:
form = TextAnalysisInputForm(request.POST)
if form.is_valid():
data = form.cleaned_data
function = data['function']
request.session['text'] = data['text']
return text_analysis(request, function, 'text', 0)
if function == 'dashboard': # Text Analysis Dashboard
var_dict = {'obj_type': 'text', 'obj_id': 0}
return render(request, 'vue/text_dashboard.html', var_dict)
else:
return text_analysis(request, function, 'text', 0)
else:
# do not present the input form if the language server is down
endpoint = nlp_url + '/api/configuration'
response = None
try:
response = requests.get(endpoint)
except:
print(response.status_code)
if response and response.status_code == 200:
var_dict = response.json()
form = TextAnalysisInputForm()
var_dict['form'] = form
else:
var_dict['error'] = off_error
return render(request, 'text_analysis_input.html', var_dict)
def text_analysis(request, function, obj_type, obj_id, file_key='', text=''):
var_dict = { 'obj_type': obj_type, 'obj_id': obj_id, 'file_key': file_key, 'title': '' }
if file_key:
if obj_type == 'corpus':
var_dict['obj_type'] = ''
else:
var_dict['obj_type_label'] = obj_type_label_dict[obj_type]
if obj_type == 'text':
var_dict['obj_id'] = 0
else:
model_class = obj_type_to_class_dict[obj_type]
obj = get_object_or_404(model_class, id=obj_id)
if function in ['context', 'summarization', 'readability']:
title, description, text = get_obj_text(obj, obj_type=obj_type, obj_id=obj_id, return_has_text=False)
request.session['text'] = '{}, {}. {}'.format(title, description, text)
var_dict['title'] = title
# var_dict['obj_type'] = 0
if function == 'dashboard':
return render(request, 'vue/text_dashboard.html', var_dict)
elif function == 'context':
var_dict['VUE'] = True
return render(request, 'vue/context_dashboard.html', var_dict)
elif function == 'summarization':
var_dict['VUE'] = True
return text_summarization(request, params=var_dict)
elif function == 'readability':
var_dict['VUE'] = True
return text_readability(request, params=var_dict)
elif function == 'wordlists':
var_dict['VUE'] = True
return render(request, 'vue/text_wordlists.html', var_dict)
| 46.775606
| 255
| 0.623859
|
79503d91aa391b2a666d7272020192061ad57717
| 28
|
py
|
Python
|
discord_interaction/abc/__init__.py
|
HazemMeqdad/discord-interaction
|
91a8807c09708e3b134c17c5bf255e8ca6c3fea7
|
[
"MIT"
] | 2
|
2021-12-09T23:01:18.000Z
|
2021-12-10T17:16:42.000Z
|
discord_interaction/abc/__init__.py
|
HazemMeqdad/discord-interaction
|
91a8807c09708e3b134c17c5bf255e8ca6c3fea7
|
[
"MIT"
] | null | null | null |
discord_interaction/abc/__init__.py
|
HazemMeqdad/discord-interaction
|
91a8807c09708e3b134c17c5bf255e8ca6c3fea7
|
[
"MIT"
] | null | null | null |
from .context import Context
| 28
| 28
| 0.857143
|
79503de20dacbec0d393737860b6e6ace488c039
| 7,058
|
py
|
Python
|
lib/IPython/core/hooks.py
|
sumanau7/Ele_CC_Sumanau
|
c0cdb3976bf02a09341393259908eafba3d1a345
|
[
"Apache-2.0"
] | 652
|
2015-07-26T00:00:17.000Z
|
2022-02-24T18:30:04.000Z
|
SLpackage/private/thirdparty/pythonpkgs/ipython/ipython_4.0.0/lib/python2.7/site-packages/IPython/core/hooks.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 8
|
2015-09-07T03:38:19.000Z
|
2021-05-23T03:18:51.000Z
|
SLpackage/private/thirdparty/pythonpkgs/ipython/ipython_4.0.0/lib/python2.7/site-packages/IPython/core/hooks.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 40
|
2015-07-24T19:45:08.000Z
|
2021-11-01T14:54:56.000Z
|
"""Hooks for IPython.
In Python, it is possible to overwrite any method of any object if you really
want to. But IPython exposes a few 'hooks', methods which are *designed* to
be overwritten by users for customization purposes. This module defines the
default versions of all such hooks, which get used by IPython if not
overridden by the user.
Hooks are simple functions, but they should be declared with ``self`` as their
first argument, because when activated they are registered into IPython as
instance methods. The self argument will be the IPython running instance
itself, so hooks have full access to the entire IPython object.
If you wish to define a new hook and activate it, you can make an :doc:`extension
</config/extensions/index>` or a :ref:`startup script <startup_files>`. For
example, you could use a startup file like this::
import os
def calljed(self,filename, linenum):
"My editor hook calls the jed editor directly."
print "Calling my own editor, jed ..."
if os.system('jed +%d %s' % (linenum,filename)) != 0:
raise TryNext()
def load_ipython_extension(ip):
ip.set_hook('editor', calljed)
"""
#*****************************************************************************
# Copyright (C) 2005 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import os
import subprocess
import sys
from IPython.core.error import TryNext
# List here all the default hooks. For now it's just the editor functions
# but over time we'll move here all the public API for user-accessible things.
__all__ = ['editor', 'fix_error_editor', 'synchronize_with_editor',
'shutdown_hook', 'late_startup_hook',
'show_in_pager','pre_prompt_hook',
'pre_run_code_hook', 'clipboard_get']
deprecated = {'pre_run_code_hook': "a callback for the 'pre_execute' or 'pre_run_cell' event",
'late_startup_hook': "a callback for the 'shell_initialized' event",
'shutdown_hook': "the atexit module",
}
def editor(self, filename, linenum=None, wait=True):
"""Open the default editor at the given filename and linenumber.
This is IPython's default editor hook, you can use it as an example to
write your own modified one. To set your own editor function as the
new editor hook, call ip.set_hook('editor',yourfunc)."""
# IPython configures a default editor at startup by reading $EDITOR from
# the environment, and falling back on vi (unix) or notepad (win32).
editor = self.editor
# marker for at which line to open the file (for existing objects)
if linenum is None or editor=='notepad':
linemark = ''
else:
linemark = '+%d' % int(linenum)
# Enclose in quotes if necessary and legal
if ' ' in editor and os.path.isfile(editor) and editor[0] != '"':
editor = '"%s"' % editor
# Call the actual editor
proc = subprocess.Popen('%s %s %s' % (editor, linemark, filename),
shell=True)
if wait and proc.wait() != 0:
raise TryNext()
import tempfile
def fix_error_editor(self,filename,linenum,column,msg):
"""Open the editor at the given filename, linenumber, column and
show an error message. This is used for correcting syntax errors.
The current implementation only has special support for the VIM editor,
and falls back on the 'editor' hook if VIM is not used.
Call ip.set_hook('fix_error_editor',youfunc) to use your own function,
"""
def vim_quickfix_file():
t = tempfile.NamedTemporaryFile()
t.write('%s:%d:%d:%s\n' % (filename,linenum,column,msg))
t.flush()
return t
if os.path.basename(self.editor) != 'vim':
self.hooks.editor(filename,linenum)
return
t = vim_quickfix_file()
try:
if os.system('vim --cmd "set errorformat=%f:%l:%c:%m" -q ' + t.name):
raise TryNext()
finally:
t.close()
def synchronize_with_editor(self, filename, linenum, column):
pass
class CommandChainDispatcher:
""" Dispatch calls to a chain of commands until some func can handle it
Usage: instantiate, execute "add" to add commands (with optional
priority), execute normally via f() calling mechanism.
"""
def __init__(self,commands=None):
if commands is None:
self.chain = []
else:
self.chain = commands
def __call__(self,*args, **kw):
""" Command chain is called just like normal func.
This will call all funcs in chain with the same args as were given to
this function, and return the result of first func that didn't raise
TryNext"""
last_exc = TryNext()
for prio,cmd in self.chain:
#print "prio",prio,"cmd",cmd #dbg
try:
return cmd(*args, **kw)
except TryNext as exc:
last_exc = exc
# if no function will accept it, raise TryNext up to the caller
raise last_exc
def __str__(self):
return str(self.chain)
def add(self, func, priority=0):
""" Add a func to the cmd chain with given priority """
self.chain.append((priority, func))
self.chain.sort(key=lambda x: x[0])
def __iter__(self):
""" Return all objects in chain.
Handy if the objects are not callable.
"""
return iter(self.chain)
def shutdown_hook(self):
""" default shutdown hook
Typically, shotdown hooks should raise TryNext so all shutdown ops are done
"""
#print "default shutdown hook ok" # dbg
return
def late_startup_hook(self):
""" Executed after ipython has been constructed and configured
"""
#print "default startup hook ok" # dbg
def show_in_pager(self, data, start, screen_lines):
""" Run a string through pager """
# raising TryNext here will use the default paging functionality
raise TryNext
def pre_prompt_hook(self):
""" Run before displaying the next prompt
Use this e.g. to display output from asynchronous operations (in order
to not mess up text entry)
"""
return None
def pre_run_code_hook(self):
""" Executed before running the (prefiltered) code in IPython """
return None
def clipboard_get(self):
""" Get text from the clipboard.
"""
from IPython.lib.clipboard import (
osx_clipboard_get, tkinter_clipboard_get,
win32_clipboard_get
)
if sys.platform == 'win32':
chain = [win32_clipboard_get, tkinter_clipboard_get]
elif sys.platform == 'darwin':
chain = [osx_clipboard_get, tkinter_clipboard_get]
else:
chain = [tkinter_clipboard_get]
dispatcher = CommandChainDispatcher()
for func in chain:
dispatcher.add(func)
text = dispatcher()
return text
| 32.827907
| 94
| 0.646359
|
79503efd8eaa401a4d84acb3e54dbbb3735203a5
| 15,101
|
py
|
Python
|
webcamlib/Scheduler.py
|
mbuckaway/trailcam
|
ebea33b7f93b5b813af53e9d00076ba5d8fb6fbf
|
[
"MIT"
] | null | null | null |
webcamlib/Scheduler.py
|
mbuckaway/trailcam
|
ebea33b7f93b5b813af53e9d00076ba5d8fb6fbf
|
[
"MIT"
] | null | null | null |
webcamlib/Scheduler.py
|
mbuckaway/trailcam
|
ebea33b7f93b5b813af53e9d00076ba5d8fb6fbf
|
[
"MIT"
] | null | null | null |
from webcamlib.Config import Config, ConfigScheduler
from webcamlib.Exceptions import InvalidFunctionError
from webcamlib.Annotate import Annotate
from webcamlib.TemperatureSensor import TemperatureSensor
from webcamlib.VoltageSensor import VoltageSensor
from webcamlib.LightSensor import LightSensor
from webcamlib.ThingSpeakData import ThingSpeakData
from webcamlib.SendAlert import SendAlert
from webcamlib.Camera import Camera
from webcamlib.FtpFile import FtpFile
from webcamlib.TwitterPost import TwitterPost
from webcamlib.TrailRestClient import TrailRestClient
from gpiozero import LED
import logging
import sched, time
import sys
from datetime import datetime
import signal
import subprocess
class Process:
"""
Class to hold the process data:
ticks - number of tick before a process is run
count - current tick count
description - describes the task
functions - functions to run
"""
def __init__(self, ticks, count, decription, functions):
self.logger = logging.getLogger('process')
self.property_ticks = ticks
self.property_count = count
self.property_functions = functions
self.property_description = decription
self.logger.debug("New Process starting at " + str(ticks) + " with count " + str(count) + " and functions " + str(functions))
def decrement_count(self):
if (self.property_count != 0):
self.property_count-=1
self.logger.debug("{} process {} ticks remaining".format(self.property_description, self.property_count))
def reset_count(self):
self.property_count=self.property_ticks-1
if (self.property_ticks<0):
self.property_ticks=0
@property
def description (self):
return self.property_description
@property
def functions(self):
return self.property_functions
@property
def do_runtask(self):
return (self.property_count==0)
class SchedulerData:
"""
Data storage class. Holds data for the scheduler that is passed around to each of the processes.
"""
def __init__(self):
self.property_temperature = 0.0
self.property_pressure = 0.0
self.property_voltage = 0.0
self.property_current = 0.0
self.property_light = 400
self.property_trailopen = False
self.property_annotation_photo = "This is a test"
self.property_annotation_twitter = "This is a test"
self.property_error = []
@property
def temperature(self):
return self.property_temperature
@temperature.setter
def temperature(self, value):
self.property_temperature = value
@property
def pressure(self):
return (self.property_pressure)
@pressure.setter
def pressure(self, value):
self.property_pressure = value
@property
def voltage(self):
return self.property_voltage
@voltage.setter
def voltage(self, value):
self.property_voltage = value
@property
def current(self):
return self.property_current
@current.setter
def current(self, value):
self.property_current = value
@property
def light(self):
return self.property_light
@light.setter
def light(self, value):
self.property_light = value
@property
def annotation_photo(self):
return self.property_annotation_photo
@annotation_photo.setter
def annotation_photo(self, value):
self.property_annotation_photo = value
@property
def annotation_twitter(self):
return self.property_annotation_twitter
@annotation_twitter.setter
def annotation_twitter(self, value):
self.property_annotation_twitter = value
@property
def haserror(self):
return len(self.property_error)>0
def clearerror(self):
self.property_error.clear()
@property
def lasterror(self):
sep = ": "
errorstr = sep.join(self.property_error)
return errorstr
@lasterror.setter
def lasterror(self, value):
self.property_error.append(value)
class Scheduler:
"""
Class to run processes on a schedule. The current scheduler limits events to one hour cycles in that the max interval length
is one hour. Most actions happen every minute, every five minutes, or every hour for our purposes. This is a "cheesy scheduler".
It is a quick hack and this scheduler may be replaced at some point if more than items per hour are required.
"""
def __init__(self, config):
self.logger = logging.getLogger('schedule')
self.config = config
self.data = SchedulerData()
self.functions = {
'sensors': self.sensors,
'senddata': self.senddata,
'checkvalues': self.checkvalues,
'annotate': self.annotate,
'photo': self.photo,
'ftpupload': self.ftpupload,
'twitterupload': self.twitterupload,
'trailstatus': self.trailstatus
}
self.SIGNALS_TO_NAMES_DICT = dict((getattr(signal, n), n) \
for n in dir(signal) if n.startswith('SIG') and '_' not in n )
self.processes = []
self.ledred = None
if self.config.ledred.enabled:
self.ledred = LED(self.config.ledred.gpiopin)
self.ledgreen = None
if self.config.ledgreen.enabled:
self.ledgreen = LED(self.config.ledgreen.gpiopin)
# Calculate the number of ticks until the next run of
# each process based on the configuration
self.ticksperhour = int(3600/self.config.scheduler.interval)
self.intervalsperminute = int(60/self.config.scheduler.interval)
# Check that the actions are valid
now = datetime.now()
minute = now.minute
self.logger.debug("Time now is {0}:{1}".format(now.hour, now.minute))
self.logger.debug("Intervals per minute: {0}".format(self.intervalsperminute))
self.logger.debug("Ticks per hour: {0}".format(self.ticksperhour))
self.lastevent = 0
for process in self.config.scheduler.processes:
if (process.enabled):
for function in process.functions:
if (function not in self.functions):
raise InvalidFunctionError(function)
# Special case. count is same as intervals per min. No calculation needed.
if (process.count == self.intervalsperminute):
current_count = 0
process_ticks_perhour = self.intervalsperminute*60
elif (process.count == self.ticksperhour):
# if we run once per hour, then the count is the min number
current_count = self.ticksperhour - minute
else:
process_ticks_perhour = int(self.ticksperhour/process.count)
current_count = int(minute%process.count)
#if (current_ticks<0):
# current_ticks = 0
self.logger.debug("---")
self.logger.debug("process_ticks_perhour {}".format(process_ticks_perhour))
self.logger.debug("current_count " + str(current_count))
self.logger.debug("process.ticks " + str(process.count))
self.processes.append(Process(process.count, current_count, process.description, process.functions))
self.action()
self.scheduler = sched.scheduler(time.time, time.sleep)
now = datetime.now()
hour = now.hour
minute = now.minute + 1
if (minute>59):
hour +=1
minute =0
if (hour > 24):
hour = 0
self.logger.info("First action starts at " + str(hour) + ":" + str(minute))
nextminute = datetime(now.year, now.month, now.day, hour, minute, 0)
self.lastevent = self.scheduler.enterabs(nextminute.timestamp(), 1, self.action)
def action(self):
""" Method to run ever scheduler tick """
if (self.lastevent):
self.lastevent = self.scheduler.enter(self.config.scheduler.interval, 1, self.action)
now = datetime.now()
self.logger.debug("Running action at {}:{}".format(now.hour, now.minute))
for process in self.processes:
if (process.do_runtask):
process.reset_count()
self.logger.info("Running: {}".format(process.description))
for function in process.functions:
self._run_function(function)
else:
process.decrement_count()
def _run_function(self, function):
if (function in self.functions):
self.logger.info("Running {0}".format(function))
try:
self.functions[function]()
except Exception as e:
self.logger.exception("Exception running function: %s", e)
else:
self.logger.error("Function {} does not exist!".format(function))
def _receive_signal(self, signum, stack):
if signum in [1,2,3,15]:
self.logger.warn('Caught signal %s (%s), exiting.' % (self.SIGNALS_TO_NAMES_DICT[signum], str(signum)))
self.stop()
sys.exit()
else:
self.logger.warn('Caught signal %s (%s), ignoring.' % (self.SIGNALS_TO_NAMES_DICT[signum], str(signum)))
def run(self):
""" Method to run the scheduler. Method never returns! """
signal.signal(signal.SIGINT, self._receive_signal)
signal.signal(signal.SIGTERM, self._receive_signal)
signal.signal(signal.SIGHUP, self._receive_signal)
self.scheduler.run(blocking=True)
def stop(self):
""" Checks if the queue is empty, and if not, cancels it """
self.logger.info("Stopping scheduler")
self.scheduler.cancel(self.lastevent)
if (not self.scheduler.empty()):
self.logger.info("Scheduler empty scheduler queue")
for event in self.scheduler.queue:
self.scheduler.cancel(event)
def sensors(self):
logging.debug("Getting sensor data...")
temperaturesensor = TemperatureSensor(self.config.sensors.temperature)
self.data.temperature = temperaturesensor.temperature
self.data.pressure = temperaturesensor.pressure
logging.info("Temperature data: {}C".format(self.data.temperature))
logging.info("Pressure data: {}hPa".format(self.data.pressure))
lightsensor = LightSensor(self.config.sensors.light)
self.data.light = lightsensor.lightlevel
logging.info("Light data: {}Lux".format(self.data.light))
voltagesensor = VoltageSensor(self.config.sensors.voltage)
self.data.voltage = voltagesensor.voltage
self.data.current = voltagesensor.current
logging.info("Voltage data: {}V {}mA".format(self.data.voltage, self.data.current))
def senddata(self):
thingspeakdata = ThingSpeakData(self.config, self.data)
thingspeakdata.WriteData()
def checkvalues(self):
"""
Check if the voltage is too low. if so, send a text msg and shutdown the system!
However, only do this IF the voltage is more then 0 and we are enabled
"""
if (self.config.sensors.voltage.enabled and
(self.data.voltage > 0) and
(self.data.voltage < self.config.hwmon.shutdown_voltage)):
self.logger.warn("Supply voltage below shutdown level! ({}V)".format(self.data.voltage))
if (self.config.hwmon.twilio_enabled):
sendalert = SendAlert(self.config.hwmon)
sendalert.SendShutdown(self.data.voltage)
if (self.config.hwmon.shutdown_enabled):
self.logger.warn("Forcing system to halt and then we exit!")
# This will fail if we are not root, but....
subprocess.call("/sbin/halt", shell=False)
self.stop()
sys.exit()
else:
self.logger.debug("Voltage values acceptable")
def annotate(self):
annotate = Annotate(self.config.annotate, self.data)
annotate.Annotate()
def photo(self):
# Check if the light sensor is enabled, and the light level is too low. if so, no photo.
if ((not self.config.sensors.light.enabled) or (self.config.sensors.light.enabled and self.data.light>30)):
camera = Camera(self.config, self.data)
camera.SnapPhoto()
camera.AnnotateImage()
else:
self.logger.warn("Skipping photo due to low light level")
def ftpupload(self):
if ((not self.config.sensors.light.enabled) or (self.config.sensors.light.enabled and self.data.light>30)):
ftpfile = FtpFile(self.config, self.data)
ftpfile.sendfile()
else:
self.logger.warn("Skipping photo due to low light level")
def twitterupload(self):
if ((not self.config.sensors.light.enabled) or (self.config.sensors.light.enabled and self.data.light>30)):
twitterpost = TwitterPost(self.config, self.data)
twitterpost.post()
else:
self.logger.warn("Skipping twitter due to low light level")
def trailstatus(self):
self.logger.debug("Getting trail status...")
restclient = TrailRestClient(self.config)
isopen = True
try:
isopen = restclient.status()
if isopen:
self.logger.warn("Trails are open")
else:
self.logger.warn("Trails are closed")
if self.config.ledred.enabled:
if isopen:
self.logger.warn("Red led is off")
self.ledred.off()
else:
self.logger.warn("Red led is on")
self.ledred.on()
else:
self.logger.warn("Red led is disabled")
if self.config.ledgreen.enabled:
if isopen:
self.logger.warn("Green led is on")
self.ledgreen.on()
else:
self.logger.warn("Green led is off")
self.ledgreen.off()
else:
self.logger.warn("Green led is disabled")
except Exception as e:
self.logger.error("Unable to update trail status: %s", str(e))
if __name__ == '__main__':
# Setup logging to the screen only for testing
root_logger = logging.getLogger('')
root_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s] [%(name)-10.10s] [%(levelname)-7.7s] %(message)s')
ch.setFormatter(formatter)
root_logger.addHandler(ch)
try:
config = Config('../config.json')
scheduler = Scheduler(config)
scheduler.run()
except Exception as e:
root_logger.exception("Error: %s", e)
| 38.133838
| 133
| 0.62135
|
795040d51dd4a7e37cc75e7fa31de41504733dd3
| 435
|
py
|
Python
|
intro/numpy/solutions/2_3_crude_integration.py
|
junghun73/Learning
|
8b5a295c42f142a3b2f5fa13fc75434a2ea9235a
|
[
"CC-BY-4.0"
] | 419
|
2016-03-05T08:50:48.000Z
|
2022-03-24T15:16:46.000Z
|
intro/numpy/solutions/2_3_crude_integration.py
|
techeye220/scipy-lecture-notes-zh-CN
|
cc87204fcc4bd2f4702f7c29c83cb8ed5c94b7d6
|
[
"CC-BY-4.0"
] | 5
|
2016-05-21T14:21:12.000Z
|
2017-10-06T11:09:48.000Z
|
intro/numpy/solutions/2_3_crude_integration.py
|
techeye220/scipy-lecture-notes-zh-CN
|
cc87204fcc4bd2f4702f7c29c83cb8ed5c94b7d6
|
[
"CC-BY-4.0"
] | 233
|
2016-02-13T09:22:57.000Z
|
2021-11-11T17:58:44.000Z
|
import numpy as np
from numpy import newaxis
def f(a, b, c):
return a**b - c
a = np.linspace(0, 1, 24)
b = np.linspace(0, 1, 12)
c = np.linspace(0, 1, 6)
samples = f(a[:,newaxis,newaxis],
b[newaxis,:,newaxis],
c[newaxis,newaxis,:])
# or,
#
# a, b, c = np.ogrid[0:1:24j, 0:1:12j, 0:1:6j]
# samples = f(a, b, c)
integral = samples.mean()
print "Approximation:", integral
print "Exact:", np.log(2) - 0.5
| 18.125
| 46
| 0.574713
|
795042c3385f0f20f02e040e644e02d653693671
| 11,159
|
py
|
Python
|
rest-service/manager_rest/utils.py
|
TS-at-WS/cloudify-manager
|
3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc
|
[
"Apache-2.0"
] | null | null | null |
rest-service/manager_rest/utils.py
|
TS-at-WS/cloudify-manager
|
3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc
|
[
"Apache-2.0"
] | null | null | null |
rest-service/manager_rest/utils.py
|
TS-at-WS/cloudify-manager
|
3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc
|
[
"Apache-2.0"
] | null | null | null |
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import glob
import json
import errno
import shutil
import zipfile
import tempfile
import platform
from datetime import datetime
from os import path, makedirs
from base64 import urlsafe_b64encode
import wagon
from flask import g
from flask import request
from werkzeug.local import LocalProxy
from flask_security import current_user
from dsl_parser.constants import HOST_AGENT_PLUGINS_TO_INSTALL
from cloudify import logs
from cloudify.constants import BROKER_PORT_SSL
from cloudify.models_states import VisibilityState
from cloudify.amqp_client import create_events_publisher, get_client
from manager_rest import constants, config, manager_exceptions
def check_allowed_endpoint(allowed_endpoints):
# Getting the resource from the endpoint, for example 'status' or 'sites'
# from 'v3.1/status' and 'v3.1/sites/<string:name>'. GET /version url
# is the only one that excludes the api version
if request.endpoint is None:
return False
endpoint_parts = request.endpoint.split('/')
request_endpoint = endpoint_parts[1] if len(endpoint_parts) > 1 else \
endpoint_parts[0]
return request_endpoint in allowed_endpoints
def is_sanity_mode():
return os.path.isfile(constants.SANITY_MODE_FILE_PATH)
def is_internal_request():
remote_addr = _get_remote_addr()
http_hosts = [_get_host(), constants.LOCAL_ADDRESS]
# cloudify-rest is the host for the auth request for the file-server
return remote_addr in http_hosts or 'cloudify-rest' in _get_host()
def _get_host():
return request.host
def _get_remote_addr():
return request.remote_addr
def copy_resources(file_server_root, resources_path=None):
if resources_path is None:
resources_path = path.abspath(__file__)
for i in range(3):
resources_path = path.dirname(resources_path)
resources_path = path.join(resources_path, 'resources')
cloudify_resources = path.join(resources_path,
'rest-service',
'cloudify')
shutil.copytree(cloudify_resources, path.join(file_server_root,
'cloudify'))
def mkdirs(folder_path):
try:
makedirs(folder_path)
except OSError as exc:
if exc.errno == errno.EEXIST and path.isdir(folder_path):
pass
else:
raise
def create_filter_params_list_description(parameters, list_type):
return [{'name': filter_val,
'description': 'List {type} matching the \'{filter}\' '
'filter value'.format(type=list_type,
filter=filter_val),
'required': False,
'allowMultiple': False,
'dataType': 'string',
'defaultValue': None,
'paramType': 'query'} for filter_val in parameters]
def read_json_file(file_path):
with open(file_path) as f:
return json.load(f)
def write_dict_to_json_file(file_path, dictionary):
with open(file_path, 'w') as f:
json.dump(dictionary, f)
def is_bypass_maintenance_mode(request):
bypass_maintenance_header = 'X-BYPASS-MAINTENANCE'
return request.headers.get(bypass_maintenance_header)
def get_plugin_archive_path(plugin_id, archive_name):
return os.path.join(
config.instance.file_server_root,
constants.FILE_SERVER_PLUGINS_FOLDER,
plugin_id,
archive_name
)
def plugin_installable_on_current_platform(plugin):
dist, _, release = platform.linux_distribution(
full_distribution_name=False)
dist, release = dist.lower(), release.lower()
# Mac OSX is a special case, in which plugin.distribution and
# plugin.release will be None instead of ''
if 'macosx' in plugin.supported_platform:
dist = dist or None
release = release or None
return (plugin.supported_platform in ('any', 'manylinux1_x86_64') or all([
plugin.supported_platform == wagon.get_platform(),
plugin.distribution == dist,
plugin.distribution_release == release
]))
def get_formatted_timestamp():
# Adding 'Z' to match ISO format
return '{0}Z'.format(datetime.utcnow().isoformat()[:-3])
class classproperty(object): # NOQA # class CapWords
"""A class that acts a a decorator for class-level properties
class A(object):
_prop1 = 1
_prop2 = 2
@classproperty
def foo(cls):
return cls._prop1 + cls._prop2
And use it like this:
print A.foo # 3
"""
def __init__(self, get_func):
self.get_func = get_func
def __get__(self, _, owner_cls):
return self.get_func(owner_cls)
def create_auth_header(username=None, password=None, token=None, tenant=None):
"""Create a valid authentication header either from username/password or
a token if any were provided; return an empty dict otherwise
"""
headers = {}
if username and password:
credentials = '{0}:{1}'.format(username, password)
headers = {constants.CLOUDIFY_AUTH_HEADER:
constants.BASIC_AUTH_PREFIX + urlsafe_b64encode(credentials)
}
elif token:
headers = {constants.CLOUDIFY_AUTH_TOKEN_HEADER: token}
if tenant:
headers[constants.CLOUDIFY_TENANT_HEADER] = tenant
return headers
def all_tenants_authorization():
return (
current_user.id == constants.BOOTSTRAP_ADMIN_ID or
any(r in current_user.system_roles
for r in config.instance.authorization_permissions['all_tenants'])
)
def tenant_specific_authorization(tenant, resource_name, action='list'):
"""
Return true if the user is permitted to perform a certain action in a
in a given tenant on a given resource (for filtering purpose).
"""
resource_name = constants.MODELS_TO_PERMISSIONS.get(resource_name,
resource_name.lower())
try:
permission_name = '{0}_{1}'.format(resource_name, action)
permission_roles = \
config.instance.authorization_permissions[permission_name]
except KeyError:
permission_roles = \
config.instance.authorization_permissions[resource_name.lower()]
return current_user.has_role_in(tenant, permission_roles)
def is_administrator(tenant):
administrators_roles = \
config.instance.authorization_permissions['administrators']
return (
current_user.id == constants.BOOTSTRAP_ADMIN_ID or
current_user.has_role_in(tenant, administrators_roles)
)
def is_create_global_permitted(tenant):
create_global_roles = \
config.instance.authorization_permissions['create_global_resource']
return (
current_user.id == constants.BOOTSTRAP_ADMIN_ID or
current_user.has_role_in(tenant, create_global_roles)
)
def can_execute_global_workflow(tenant):
execute_global_roles = \
config.instance.authorization_permissions['execute_global_workflow']
return (
current_user.id == constants.BOOTSTRAP_ADMIN_ID or
current_user.has_role_in(tenant, execute_global_roles)
)
def validate_global_modification(resource):
# A global resource can't be modify from outside its tenant
if resource.visibility == VisibilityState.GLOBAL and \
resource.tenant_name != current_tenant.name:
raise manager_exceptions.IllegalActionError(
"Can't modify the global resource `{0}` from outside its "
"tenant `{1}`".format(resource.id, resource.tenant_name))
@LocalProxy
def current_tenant():
tenant = getattr(g, 'current_tenant', None)
if not tenant:
raise manager_exceptions.TenantNotProvided(
'Authorization failed: tenant not provided')
return tenant
def set_current_tenant(tenant):
g.current_tenant = tenant
def unzip(archive, destination=None, logger=None):
if not destination:
destination = tempfile.mkdtemp()
if logger:
logger.debug('Extracting zip {0} to {1}...'.
format(archive, destination))
with zipfile.ZipFile(archive, 'r') as zip_file:
zip_file.extractall(destination)
return destination
def files_in_folder(folder, name_pattern='*'):
files = []
for item in glob.glob(os.path.join(folder, name_pattern)):
if os.path.isfile(item):
files.append(os.path.join(folder, item))
return files
def remove(path):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
def send_event(event, message_type):
logs.populate_base_item(event, 'cloudify_event')
events_publisher = create_events_publisher(
amqp_host=config.instance.amqp_host,
amqp_user=config.instance.amqp_username,
amqp_pass=config.instance.amqp_password,
amqp_port=BROKER_PORT_SSL,
amqp_vhost='/',
ssl_enabled=True,
ssl_cert_path=config.instance.amqp_ca_path
)
events_publisher.publish_message(event, message_type)
events_publisher.close()
def is_visibility_wider(first, second):
states = VisibilityState.STATES
return states.index(first) > states.index(second)
def validate_deployment_and_site_visibility(deployment, site):
if is_visibility_wider(deployment.visibility, site.visibility):
raise manager_exceptions.IllegalActionError(
"The visibility of deployment `{0}`: `{1}` can't be wider than "
"the visibility of it's site `{2}`: `{3}`"
.format(deployment.id, deployment.visibility, site.name,
site.visibility)
)
def extract_host_agent_plugins_from_plan(plan):
host_agent_plugins_to_install = plan.get(
HOST_AGENT_PLUGINS_TO_INSTALL, [])
if not host_agent_plugins_to_install:
for node in plan.get('nodes', []):
for plugin in node.get('plugins_to_install', []):
host_agent_plugins_to_install.append(plugin)
return host_agent_plugins_to_install
def get_amqp_client():
return get_client(
amqp_host=config.instance.amqp_host,
amqp_user=config.instance.amqp_username,
amqp_pass=config.instance.amqp_password,
amqp_port=BROKER_PORT_SSL,
amqp_vhost='/',
ssl_enabled=True,
ssl_cert_path=config.instance.amqp_ca_path,
connect_timeout=3,
)
| 31.792023
| 79
| 0.683663
|
795042ce3f3a51eaca4061c6aad6ca978e6b8b10
| 1,113
|
py
|
Python
|
panel/_testing/fixtures.py
|
rupakgoyal/panel-
|
4e1e01e1766ebfc2fc1efb409734fd51efc60c01
|
[
"BSD-3-Clause"
] | 1
|
2019-10-15T13:21:20.000Z
|
2019-10-15T13:21:20.000Z
|
panel/_testing/fixtures.py
|
rupakgoyal/panel-
|
4e1e01e1766ebfc2fc1efb409734fd51efc60c01
|
[
"BSD-3-Clause"
] | null | null | null |
panel/_testing/fixtures.py
|
rupakgoyal/panel-
|
4e1e01e1766ebfc2fc1efb409734fd51efc60c01
|
[
"BSD-3-Clause"
] | 1
|
2019-06-04T04:17:53.000Z
|
2019-06-04T04:17:53.000Z
|
"""
A module containing testing utilities and fixtures.
"""
from __future__ import absolute_import, division, unicode_literals
import re
import shutil
import pytest
from bokeh.document import Document
from pyviz_comms import Comm
@pytest.fixture
def document():
return Document()
@pytest.fixture
def comm():
return Comm()
@pytest.yield_fixture
def hv_bokeh():
import holoviews as hv
hv.renderer('bokeh')
prev_backend = hv.Store.current_backend
hv.Store.current_backend = 'bokeh'
yield
hv.Store.current_backend = prev_backend
@pytest.yield_fixture
def hv_mpl():
import holoviews as hv
hv.renderer('matplotlib')
prev_backend = hv.Store.current_backend
hv.Store.current_backend = 'matplotlib'
yield
hv.Store.current_backend = prev_backend
@pytest.yield_fixture
def tmpdir(request, tmpdir_factory):
name = request.node.name
name = re.sub("[\W]", "_", name)
MAXVAL = 30
if len(name) > MAXVAL:
name = name[:MAXVAL]
tmp_dir = tmpdir_factory.mktemp(name, numbered=True)
yield tmp_dir
shutil.rmtree(str(tmp_dir))
| 20.236364
| 66
| 0.71159
|
795043147a5a5969d9454ed0cf389b455c8534be
| 6,635
|
py
|
Python
|
test.py
|
zhhugh/Violation-detection
|
0a3c0306a9fcac0518e5adc524d457af9e58e8d3
|
[
"MIT"
] | 1
|
2021-04-23T09:26:20.000Z
|
2021-04-23T09:26:20.000Z
|
test.py
|
zhhugh/Violation-detection
|
0a3c0306a9fcac0518e5adc524d457af9e58e8d3
|
[
"MIT"
] | null | null | null |
test.py
|
zhhugh/Violation-detection
|
0a3c0306a9fcac0518e5adc524d457af9e58e8d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project :building detection
@File :test.py
@Author :zhouhan
@Date :2021/4/22 5:56 下午
'''
# -*- coding: utf-8 -*-
import warnings
warnings.filterwarnings("ignore")
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
import cv2
import time
from mrcnn.config import Config
from datetime import datetime
from mrcnn.utils import compute_overlaps_masks
import colorsys
# 工程根目录
ROOT_DIR = os.getcwd()
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "data")
# 加载模型
COCO_MODEL_PATH = os.path.join(MODEL_DIR, "mask_rcnn_crowdai-mapping-challenge_0029.h5")
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
class ShapesConfig(Config):
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 1 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 320 # 320
IMAGE_MAX_DIM = 384 # 384
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8 * 6, 16 * 6, 32 * 6, 64 * 6, 128 * 6) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 10
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 10
# use small validation steps since the epoch is small
VALIDATION_STEPS = 50
# import train_tongue
# class InferenceConfig(coco.CocoConfig):
class InferenceConfig(ShapesConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 1 # 1 Background + 1 Building
IMAGE_MAX_DIM = 320
IMAGE_MIN_DIM = 320
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'building']
# 计算两个mask之间的IOU
def compute_mask_coverage(mask1, mask2):
mask1 = np.reshape(mask1 > .5, (-1, 1)).astype(np.float32)
mask2 = np.reshape(mask2 > .5, (-1, 1)).astype(np.float32)
intersection = np.dot(mask1.T, mask2)
area = np.sum(mask2, axis=0)
# area2 = np.sum(mask2, axis=0)
# union = (area1[:, None] + area2[None:, ]) - intersection
# iou = intersection / union
coverage = intersection / area
return coverage
def union_mask(masks):
total_mask = np.sum(masks, axis=2)
return total_mask
def detection(path, image_type=1):
# 提取文件名
image_name = os.path.split(path)[1]
image_name = os.path.splitext(image_name)[0]
image = skimage.io.imread(path)
a = datetime.now()
# Run detection
results = model.detect([image], verbose=1)
print('results:')
# print(results)
b = datetime.now()
# Visualize results
print("time_cost", (b - a).seconds)
r = results[0]
image_save_path = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], figsize=(8, 8), image_name=image_name,
image_type=image_type)
return image_save_path
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
return colors
violation_confidence = 0.4
def violation_building_detection(base_image_path, new_image_path):
"""
@param base_image_path: 变化前图片路径
@param new_image_path: 变化后图片路径
@return: 变化前识别结果保存路径, 变化后识别结果保存路径
"""
violation_building_nums = 0
colors = random_colors(2)
base_image = skimage.io.imread(base_image_path)
new_image = skimage.io.imread(new_image_path)
base_image_name = os.path.split(base_image_path)[1]
base_image_name = os.path.splitext(base_image_name)[0]
new_image_name = os.path.split(new_image_path)[1]
new_image_name = os.path.splitext(new_image_name)[0]
base_results = model.detect([base_image], verbose=1)
new_results = model.detect([new_image], verbose=1)
base_r = base_results[0]
new_r = new_results[0]
base_n = base_r['class_ids'].size
violation_indexes = [0 for i in range(base_n)]
base_image_save_path = visualize.display_instances(base_image, base_r['rois'], base_r['masks'], base_r['class_ids'],
class_names, base_r['scores'], figsize=(8, 8),
image_name=base_image_name,
image_type=1, violation_indexes=violation_indexes, colors=colors)
new_n = new_r['class_ids'].size
violation_indexes = [0 for i in range(new_n)]
if base_n != new_n:
total_mask = union_mask(base_r['masks'])
for i in range(new_n):
coverage = compute_mask_coverage(total_mask, new_r['masks'][:, :, i])
print(coverage)
if coverage < 0.4:
print("发现疑似违章建筑")
violation_indexes[i] = 1
violation_building_nums += 1
else:
print("没有发现违章建筑")
new_image_save_path = visualize.display_instances(new_image, new_r['rois'], new_r['masks'], new_r['class_ids'],
class_names, new_r['scores'], figsize=(8, 8),
image_name=new_image_name,
image_type=2, violation_indexes=violation_indexes, colors=colors)
return base_image_save_path, new_image_save_path, violation_building_nums
| 32.208738
| 120
| 0.652449
|
795043227ee9d2ef627c08f58b3fd158da97d30c
| 59,759
|
py
|
Python
|
core/domain/topic_services_test.py
|
group1oppia/oppia
|
84d9f4f7cf9f8737c4bb7677468eec41607bc187
|
[
"Apache-2.0"
] | null | null | null |
core/domain/topic_services_test.py
|
group1oppia/oppia
|
84d9f4f7cf9f8737c4bb7677468eec41607bc187
|
[
"Apache-2.0"
] | null | null | null |
core/domain/topic_services_test.py
|
group1oppia/oppia
|
84d9f4f7cf9f8737c4bb7677468eec41607bc187
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for topic services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
class TopicServicesUnitTests(test_utils.GenericTestBase):
"""Tests for topic services."""
user_id = 'user_id'
story_id_1 = 'story_1'
story_id_2 = 'story_2'
story_id_3 = 'story_3'
subtopic_id = 1
skill_id_1 = 'skill_1'
skill_id_2 = 'skill_2'
def setUp(self):
super(TopicServicesUnitTests, self).setUp()
self.TOPIC_ID = topic_services.get_new_topic_id()
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'title': 'Title',
'subtopic_id': 1
})]
self.save_new_topic(
self.TOPIC_ID, self.user_id, name='Name',
abbreviated_name='abbrev', thumbnail_filename='topic.png',
description='Description',
canonical_story_ids=[self.story_id_1, self.story_id_2],
additional_story_ids=[self.story_id_3],
uncategorized_skill_ids=[self.skill_id_1, self.skill_id_2],
subtopics=[], next_subtopic_id=1)
self.save_new_story(
self.story_id_1, self.user_id, 'Title', 'Description', 'Notes',
self.TOPIC_ID)
self.save_new_story(
self.story_id_3, self.user_id, 'Title 3', 'Description 3', 'Notes',
self.TOPIC_ID)
self.save_new_story(
self.story_id_2, self.user_id, 'Title 2', 'Description 2', 'Notes',
self.TOPIC_ID)
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.signup(self.ADMIN_EMAIL, username=self.ADMIN_USERNAME)
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist, 'Added a subtopic')
self.topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.set_admins([self.ADMIN_USERNAME])
self.set_topic_managers([user_services.get_username(self.user_id_a)])
self.user_a = user_services.UserActionsInfo(self.user_id_a)
self.user_b = user_services.UserActionsInfo(self.user_id_b)
self.user_admin = user_services.UserActionsInfo(self.user_id_admin)
def test_compute_summary(self):
topic_summary = topic_services.compute_summary_of_topic(self.topic)
self.assertEqual(topic_summary.id, self.TOPIC_ID)
self.assertEqual(topic_summary.name, 'Name')
self.assertEqual(topic_summary.canonical_story_count, 0)
self.assertEqual(topic_summary.additional_story_count, 0)
self.assertEqual(topic_summary.uncategorized_skill_count, 2)
self.assertEqual(topic_summary.subtopic_count, 1)
self.assertEqual(topic_summary.total_skill_count, 2)
def test_get_all_summaries(self):
topic_summaries = topic_services.get_all_topic_summaries()
self.assertEqual(len(topic_summaries), 1)
self.assertEqual(topic_summaries[0].name, 'Name')
self.assertEqual(topic_summaries[0].canonical_story_count, 0)
self.assertEqual(topic_summaries[0].additional_story_count, 0)
self.assertEqual(topic_summaries[0].total_skill_count, 2)
self.assertEqual(topic_summaries[0].uncategorized_skill_count, 2)
self.assertEqual(topic_summaries[0].subtopic_count, 1)
def test_get_multi_summaries(self):
topic_summaries = topic_services.get_multi_topic_summaries([
self.TOPIC_ID, 'invalid_id'])
self.assertEqual(len(topic_summaries), 2)
self.assertEqual(topic_summaries[0].name, 'Name')
self.assertEqual(topic_summaries[0].canonical_story_count, 0)
self.assertEqual(topic_summaries[0].additional_story_count, 0)
self.assertEqual(topic_summaries[0].total_skill_count, 2)
self.assertEqual(topic_summaries[0].uncategorized_skill_count, 2)
self.assertEqual(topic_summaries[0].subtopic_count, 1)
self.assertIsNone(topic_summaries[1])
def test_get_multi_rights(self):
topic_rights = topic_services.get_multi_topic_rights([
self.TOPIC_ID, 'invalid_id'])
self.assertEqual(len(topic_rights), 2)
self.assertEqual(topic_rights[0].id, self.TOPIC_ID)
self.assertEqual(topic_rights[0].manager_ids, [])
self.assertFalse(topic_rights[0].topic_is_published)
self.assertIsNone(topic_rights[1])
def test_get_new_topic_id(self):
new_topic_id = topic_services.get_new_topic_id()
self.assertEqual(len(new_topic_id), 12)
self.assertEqual(topic_models.TopicModel.get_by_id(new_topic_id), None)
def test_get_topic_from_model(self):
topic_model = topic_models.TopicModel.get(self.TOPIC_ID)
topic = topic_fetchers.get_topic_from_model(topic_model)
self.assertEqual(topic.to_dict(), self.topic.to_dict())
def test_cannot_get_topic_from_model_with_invalid_schema_version(self):
topic_services.create_new_topic_rights('topic_id', self.user_id_a)
commit_cmd = topic_domain.TopicChange({
'cmd': topic_domain.CMD_CREATE_NEW,
'name': 'name'
})
subtopic_dict = {
'id': 1,
'title': 'subtopic_title',
'skill_ids': []
}
model = topic_models.TopicModel(
id='topic_id',
name='name',
abbreviated_name='abbrev',
canonical_name='canonical_name',
next_subtopic_id=1,
language_code='en',
subtopics=[subtopic_dict],
subtopic_schema_version=0,
story_reference_schema_version=0
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
self.user_id_a, 'topic model created', commit_cmd_dicts)
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1-v%d subtopic schemas at '
'present.' % feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
topic_fetchers.get_topic_from_model(model)
topic_services.create_new_topic_rights('topic_id_2', self.user_id_a)
model = topic_models.TopicModel(
id='topic_id_2',
name='name 2',
abbreviated_name='abbrev',
canonical_name='canonical_name_2',
next_subtopic_id=1,
language_code='en',
subtopics=[subtopic_dict],
subtopic_schema_version=1,
story_reference_schema_version=0
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
self.user_id_a, 'topic model created', commit_cmd_dicts)
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1-v%d story reference schemas at '
'present.' % feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
topic_fetchers.get_topic_from_model(model)
def test_get_topic_summary_from_model(self):
topic_summary_model = topic_models.TopicSummaryModel.get(self.TOPIC_ID)
topic_summary = topic_services.get_topic_summary_from_model(
topic_summary_model)
self.assertEqual(topic_summary.id, self.TOPIC_ID)
self.assertEqual(topic_summary.name, 'Name')
self.assertEqual(topic_summary.canonical_story_count, 0)
self.assertEqual(topic_summary.additional_story_count, 0)
self.assertEqual(topic_summary.uncategorized_skill_count, 2)
self.assertEqual(topic_summary.total_skill_count, 2)
self.assertEqual(topic_summary.subtopic_count, 1)
def test_get_topic_summary_by_id(self):
topic_summary = topic_services.get_topic_summary_by_id(self.TOPIC_ID)
self.assertEqual(topic_summary.id, self.TOPIC_ID)
self.assertEqual(topic_summary.name, 'Name')
self.assertEqual(topic_summary.canonical_story_count, 0)
self.assertEqual(topic_summary.additional_story_count, 0)
self.assertEqual(topic_summary.uncategorized_skill_count, 2)
self.assertEqual(topic_summary.subtopic_count, 1)
def test_get_all_skill_ids_assigned_to_some_topic(self):
change_list = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': 1,
'skill_id': self.skill_id_1
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, change_list,
'Moved skill to subtopic.')
topic_id = topic_services.get_new_topic_id()
self.save_new_topic(
topic_id, self.user_id, name='Name 2',
abbreviated_name='abbrev', thumbnail_filename=None,
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.skill_id_1, 'skill_3'],
subtopics=[], next_subtopic_id=1)
self.assertEqual(
topic_services.get_all_skill_ids_assigned_to_some_topic(),
set([self.skill_id_1, self.skill_id_2, 'skill_3']))
def test_cannot_create_topic_change_class_with_invalid_changelist(self):
with self.assertRaisesRegexp(
Exception, 'Missing cmd key in change dict'):
topic_domain.TopicChange({
'invalid_cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': topic_domain.TOPIC_PROPERTY_DESCRIPTION,
'old_value': 'Description',
'new_value': 'New Description'
})
def test_cannot_update_topic_property_with_invalid_changelist(self):
with self.assertRaisesRegexp(
Exception, (
'Value for property_name in cmd update_topic_property: '
'invalid property is not allowed')):
topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': 'invalid property',
'old_value': 'Description',
'new_value': 'New Description'
})
def test_cannot_update_subtopic_property_with_invalid_changelist(self):
with self.assertRaisesRegexp(
Exception, (
'The following required attributes are '
'missing: subtopic_id')):
topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY,
'property_name': 'invalid property',
'old_value': 'Description',
'new_value': 'New Description'
})
def test_update_subtopic_property(self):
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(len(topic.subtopics), 1)
self.assertEqual(topic.subtopics[0].title, 'Title')
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY,
'property_name': 'title',
'subtopic_id': 1,
'old_value': 'Title',
'new_value': 'New Title'
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Update title of subtopic.')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(len(topic.subtopics), 1)
self.assertEqual(topic.subtopics[0].title, 'New Title')
def test_cannot_create_topic_change_class_with_invalid_cmd(self):
with self.assertRaisesRegexp(
Exception, 'Command invalid cmd is not allowed'):
topic_domain.TopicChange({
'cmd': 'invalid cmd',
'property_name': 'title',
'subtopic_id': 1,
'old_value': 'Description',
'new_value': 'New Description'
})
def test_publish_and_unpublish_story(self):
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(
topic.canonical_story_references[0].story_is_published, False)
self.assertEqual(
topic.additional_story_references[0].story_is_published, False)
topic_services.publish_story(
self.TOPIC_ID, self.story_id_1, self.user_id_admin)
topic_services.publish_story(
self.TOPIC_ID, self.story_id_3, self.user_id_admin)
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
topic_summary = topic_services.get_topic_summary_by_id(self.TOPIC_ID)
self.assertEqual(
topic.canonical_story_references[0].story_is_published, True)
self.assertEqual(
topic.additional_story_references[0].story_is_published, True)
self.assertEqual(topic_summary.canonical_story_count, 1)
self.assertEqual(topic_summary.additional_story_count, 1)
topic_services.unpublish_story(
self.TOPIC_ID, self.story_id_1, self.user_id_admin)
topic_services.unpublish_story(
self.TOPIC_ID, self.story_id_3, self.user_id_admin)
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
topic_summary = topic_services.get_topic_summary_by_id(self.TOPIC_ID)
self.assertEqual(
topic.canonical_story_references[0].story_is_published, False)
self.assertEqual(
topic.additional_story_references[0].story_is_published, False)
self.assertEqual(topic_summary.canonical_story_count, 0)
self.assertEqual(topic_summary.additional_story_count, 0)
def test_invalid_publish_and_unpublish_story(self):
with self.assertRaisesRegexp(
Exception, 'A topic with the given ID doesn\'t exist'):
topic_services.publish_story(
'invalid_topic', 'story_id_new', self.user_id_admin)
with self.assertRaisesRegexp(
Exception, 'A topic with the given ID doesn\'t exist'):
topic_services.unpublish_story(
'invalid_topic', 'story_id_new', self.user_id_admin)
with self.assertRaisesRegexp(
Exception, 'The user does not have enough rights to publish the '
'story.'):
topic_services.publish_story(
self.TOPIC_ID, self.story_id_3, self.user_id_b)
with self.assertRaisesRegexp(
Exception, 'The user does not have enough rights to unpublish the '
'story.'):
topic_services.unpublish_story(
self.TOPIC_ID, self.story_id_3, self.user_id_b)
with self.assertRaisesRegexp(
Exception, 'A story with the given ID doesn\'t exist'):
topic_services.publish_story(
self.TOPIC_ID, 'invalid_story', self.user_id_admin)
with self.assertRaisesRegexp(
Exception, 'A story with the given ID doesn\'t exist'):
topic_services.unpublish_story(
self.TOPIC_ID, 'invalid_story', self.user_id_admin)
self.save_new_story(
'story_10', self.user_id, 'Title 2', 'Description 2', 'Notes',
self.TOPIC_ID)
with self.assertRaisesRegexp(
Exception, 'Story with given id doesn\'t exist in the topic'):
topic_services.publish_story(
self.TOPIC_ID, 'story_10', self.user_id_admin)
with self.assertRaisesRegexp(
Exception, 'Story with given id doesn\'t exist in the topic'):
topic_services.unpublish_story(
self.TOPIC_ID, 'story_10', self.user_id_admin)
# Throw error if a story node doesn't have an exploration.
self.save_new_story(
'story_id_new', self.user_id, 'Title 2', 'Description 2', 'Notes',
self.TOPIC_ID)
topic_services.add_canonical_story(
self.user_id_admin, self.TOPIC_ID, 'story_id_new')
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 1'
})
]
story_services.update_story(
self.user_id_admin, 'story_id_new', changelist,
'Added node.')
with self.assertRaisesRegexp(
Exception, 'Story node with id node_1 does not contain an '
'exploration id.'):
topic_services.publish_story(
self.TOPIC_ID, 'story_id_new', self.user_id_admin)
# Throw error if exploration isn't published.
self.save_new_default_exploration(
'exp_id', self.user_id_admin, title='title')
self.publish_exploration(self.user_id_admin, 'exp_id')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': 'node_1',
'old_value': None,
'new_value': 'exp_id'
})]
story_services.update_story(
self.user_id_admin, 'story_id_new', change_list,
'Updated story node.')
rights_manager.unpublish_exploration(self.user_admin, 'exp_id')
with self.assertRaisesRegexp(
Exception, 'Exploration with ID exp_id is not public. Please '
'publish explorations before adding them to a story.'):
topic_services.publish_story(
self.TOPIC_ID, 'story_id_new', self.user_id_admin)
# Throws error if exploration doesn't exist.
exp_services.delete_exploration(self.user_id_admin, 'exp_id')
with self.assertRaisesRegexp(
Exception, 'Expected story to only reference valid explorations, '
'but found a reference to an invalid exploration with ID: exp_id'):
topic_services.publish_story(
self.TOPIC_ID, 'story_id_new', self.user_id_admin)
def test_update_topic(self):
topic_services.assign_role(
self.user_admin, self.user_a, topic_domain.ROLE_MANAGER,
self.TOPIC_ID)
# Test whether an admin can edit a topic.
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': topic_domain.TOPIC_PROPERTY_DESCRIPTION,
'old_value': 'Description',
'new_value': 'New Description'
}), topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': topic_domain.TOPIC_PROPERTY_ABBREVIATED_NAME,
'old_value': '',
'new_value': 'short name'
}), topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': topic_domain.TOPIC_PROPERTY_THUMBNAIL_FILENAME,
'old_value': '',
'new_value': 'thumbnail.png'
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated Description.')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
topic_summary = topic_services.get_topic_summary_by_id(self.TOPIC_ID)
self.assertEqual(topic.description, 'New Description')
self.assertEqual(topic.abbreviated_name, 'short name')
self.assertEqual(topic.thumbnail_filename, 'thumbnail.png')
self.assertEqual(topic.version, 3)
self.assertEqual(topic_summary.version, 3)
# Test whether a topic_manager can edit a topic.
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': topic_domain.TOPIC_PROPERTY_NAME,
'old_value': 'Name',
'new_value': 'New Name'
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_a, self.TOPIC_ID, changelist, 'Updated Name.')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
topic_summary = topic_services.get_topic_summary_by_id(self.TOPIC_ID)
self.assertEqual(topic.name, 'New Name')
self.assertEqual(topic.canonical_name, 'new name')
self.assertEqual(topic.version, 4)
self.assertEqual(topic_summary.name, 'New Name')
self.assertEqual(topic_summary.version, 4)
def test_update_topic_and_subtopic_page(self):
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'title': 'Title3',
'subtopic_id': 3
})]
with self.assertRaisesRegexp(
Exception, 'The given new subtopic id 3 is not equal to '
'the expected next subtopic id: 2'):
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Added subtopic.')
# Test whether the subtopic page was created for the above failed
# attempt.
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 3, strict=False)
self.assertIsNone(subtopic_page)
# Test exception raised for simultaneous adding and removing of
# subtopics.
changelist = [
topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'title': 'Title2',
'subtopic_id': 2
}),
topic_domain.TopicChange({
'cmd': topic_domain.CMD_DELETE_SUBTOPIC,
'subtopic_id': 2
})
]
with self.assertRaisesRegexp(
Exception, 'The incoming changelist had simultaneous'
' creation and deletion of subtopics.'):
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Added and deleted a subtopic.')
# Test whether a subtopic page already existing in datastore can be
# edited.
changelist = [subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY,
'property_name': (
subtopic_page_domain.SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML),
'old_value': '',
'subtopic_id': 1,
'new_value': {
'html': '<p>New Value</p>',
'content_id': 'content'
}
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated html data')
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 1)
self.assertEqual(
subtopic_page.page_contents.subtitled_html.html,
'<p>New Value</p>')
# Test a sequence of changes with both topic and subtopic page changes.
changelist = [
topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'title': 'Title2',
'subtopic_id': 2
}),
topic_domain.TopicChange({
'cmd': topic_domain.CMD_DELETE_SUBTOPIC,
'subtopic_id': 1
}),
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY,
'property_name': (
subtopic_page_domain
.SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML),
'old_value': {
'html': '',
'content_id': 'content'
},
'subtopic_id': 2,
'new_value': {
'html': '<p>New Value</p>',
'content_id': 'content'
}
}),
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY,
'property_name': (
subtopic_page_domain
.SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO),
'old_value': {
'voiceovers_mapping': {
'content': {}
}
},
'new_value': {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'test.mp3',
'file_size_bytes': 100,
'needs_update': False,
'duration_secs': 0.3
}
}
}
},
'subtopic_id': 2
}),
topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': 2,
'skill_id': self.skill_id_1
})
]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Added and removed a subtopic.')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(len(topic.subtopics), 1)
self.assertEqual(topic.next_subtopic_id, 3)
self.assertEqual(topic.subtopics[0].title, 'Title2')
self.assertEqual(topic.subtopics[0].skill_ids, [self.skill_id_1])
# Test whether the subtopic page corresponding to the deleted subtopic
# was also deleted.
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 1, strict=False)
self.assertIsNone(subtopic_page)
# Validate the newly created subtopic page.
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 2, strict=False)
self.assertEqual(
subtopic_page.page_contents.subtitled_html.html,
'<p>New Value</p>')
self.assertEqual(
subtopic_page.page_contents.recorded_voiceovers.to_dict(), {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'test.mp3',
'file_size_bytes': 100,
'needs_update': False,
'duration_secs': 0.3
}
}
}
})
# Making sure everything resets when an error is encountered anywhere.
changelist = [
topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'title': 'Title3',
'subtopic_id': 3
}),
topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'title': 'Title4',
'subtopic_id': 4
}),
topic_domain.TopicChange({
'cmd': topic_domain.CMD_DELETE_SUBTOPIC,
'subtopic_id': 2
}),
# The following is an invalid command as subtopic with id 2 was
# deleted in previous step.
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY,
'property_name': (
subtopic_page_domain
.SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML),
'old_value': '',
'subtopic_id': 2,
'new_value': {
'html': '<p>New Value</p>',
'content_id': 'content'
}
}),
]
with self.assertRaisesRegexp(
Exception, 'The subtopic with id 2 doesn\'t exist'):
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Done some changes.')
# Make sure the topic object in datastore is not affected.
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(len(topic.subtopics), 1)
self.assertEqual(topic.next_subtopic_id, 3)
self.assertEqual(topic.subtopics[0].title, 'Title2')
self.assertEqual(topic.subtopics[0].skill_ids, [self.skill_id_1])
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 3, strict=False)
self.assertIsNone(subtopic_page)
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 4, strict=False)
self.assertIsNone(subtopic_page)
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 2, strict=False)
self.assertIsNotNone(subtopic_page)
def test_add_uncategorized_skill(self):
topic_services.add_uncategorized_skill(
self.user_id_admin, self.TOPIC_ID, 'skill_id_3')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(
topic.uncategorized_skill_ids,
[self.skill_id_1, self.skill_id_2, 'skill_id_3'])
topic_commit_log_entry = (
topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3)
)
self.assertEqual(topic_commit_log_entry.commit_type, 'edit')
self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID)
self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin)
self.assertEqual(
topic_commit_log_entry.commit_message,
'Added skill_id_3 to uncategorized skill ids')
def test_delete_uncategorized_skill(self):
topic_services.delete_uncategorized_skill(
self.user_id_admin, self.TOPIC_ID, self.skill_id_1)
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(topic.uncategorized_skill_ids, [self.skill_id_2])
topic_commit_log_entry = (
topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3)
)
self.assertEqual(topic_commit_log_entry.commit_type, 'edit')
self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID)
self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin)
self.assertEqual(
topic_commit_log_entry.commit_message,
'Removed %s from uncategorized skill ids' % self.skill_id_1)
def test_delete_canonical_story(self):
topic_services.delete_canonical_story(
self.user_id_admin, self.TOPIC_ID, self.story_id_1)
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(len(topic.canonical_story_references), 1)
self.assertEqual(
topic.canonical_story_references[0].story_id, self.story_id_2)
topic_commit_log_entry = (
topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3)
)
self.assertEqual(topic_commit_log_entry.commit_type, 'edit')
self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID)
self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin)
self.assertEqual(
topic_commit_log_entry.commit_message,
'Removed %s from canonical story ids' % self.story_id_1)
def test_add_canonical_story(self):
topic_services.add_canonical_story(
self.user_id_admin, self.TOPIC_ID, 'story_id')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(
len(topic.canonical_story_references), 3)
self.assertEqual(
topic.canonical_story_references[2].story_id, 'story_id')
topic_commit_log_entry = (
topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3)
)
self.assertEqual(topic_commit_log_entry.commit_type, 'edit')
self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID)
self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin)
self.assertEqual(
topic_commit_log_entry.commit_message,
'Added %s to canonical story ids' % 'story_id')
def test_delete_additional_story(self):
topic_services.delete_additional_story(
self.user_id_admin, self.TOPIC_ID, self.story_id_3)
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(len(topic.additional_story_references), 0)
topic_commit_log_entry = (
topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3)
)
self.assertEqual(topic_commit_log_entry.commit_type, 'edit')
self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID)
self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin)
self.assertEqual(
topic_commit_log_entry.commit_message,
'Removed %s from additional story ids' % self.story_id_3)
def test_add_additional_story(self):
topic_services.add_additional_story(
self.user_id_admin, self.TOPIC_ID, 'story_id_4')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(
len(topic.additional_story_references), 2)
self.assertEqual(
topic.additional_story_references[1].story_id, 'story_id_4')
topic_commit_log_entry = (
topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3)
)
self.assertEqual(topic_commit_log_entry.commit_type, 'edit')
self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID)
self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin)
self.assertEqual(
topic_commit_log_entry.commit_message,
'Added story_id_4 to additional story ids')
def test_delete_topic(self):
# Test whether an admin can delete a topic.
topic_services.delete_topic(self.user_id_admin, self.TOPIC_ID)
self.assertIsNone(
topic_fetchers.get_topic_by_id(self.TOPIC_ID, strict=False))
self.assertIsNone(
topic_services.get_topic_summary_by_id(self.TOPIC_ID, strict=False))
self.assertIsNone(
subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 1, strict=False))
def test_delete_subtopic_with_skill_ids(self):
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_DELETE_SUBTOPIC,
'subtopic_id': self.subtopic_id
})]
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 1, strict=False)
self.assertEqual(subtopic_page.id, self.TOPIC_ID + '-1')
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Removed 1 subtopic.')
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
self.TOPIC_ID, 1, strict=False)
self.assertIsNone(subtopic_page)
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(
topic.uncategorized_skill_ids, [self.skill_id_1, self.skill_id_2])
self.assertEqual(topic.subtopics, [])
def test_update_subtopic_skill_ids(self):
# Adds a subtopic and moves skill id from one to another.
changelist = [
topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': self.subtopic_id,
'skill_id': self.skill_id_1
}),
topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': self.subtopic_id,
'skill_id': self.skill_id_2
}),
topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'title': 'Title2',
'subtopic_id': 2
}),
topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': self.subtopic_id,
'new_subtopic_id': 2,
'skill_id': self.skill_id_2
})
]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
subtopic_page = subtopic_page_services.get_subtopic_page_by_id(
topic.id, 2)
self.assertEqual(topic.uncategorized_skill_ids, [])
self.assertEqual(topic.subtopics[0].skill_ids, [self.skill_id_1])
self.assertEqual(topic.subtopics[1].skill_ids, [self.skill_id_2])
self.assertEqual(topic.subtopics[1].id, 2)
self.assertEqual(topic.next_subtopic_id, 3)
self.assertEqual(subtopic_page.topic_id, topic.id)
self.assertEqual(subtopic_page.id, self.TOPIC_ID + '-2')
# Tests invalid case where skill id is not present in the old subtopic.
changelist = [
topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': self.subtopic_id,
'new_subtopic_id': 2,
'skill_id': self.skill_id_2
})
]
with self.assertRaisesRegexp(
Exception,
'Skill id %s is not present in the given old subtopic'
% self.skill_id_2):
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
# Tests invalid case where skill id is not an uncategorized skill id.
changelist = [
topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': 2,
'skill_id': 'skill_10'
})
]
with self.assertRaisesRegexp(
Exception,
'Skill id skill_10 is not an uncategorized skill id'):
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
# Tests invalid case where target subtopic doesn't exist.
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': self.subtopic_id,
'new_subtopic_id': None,
'skill_id': self.skill_id_1
})]
with self.assertRaisesRegexp(
Exception, 'The subtopic with id None does not exist.'):
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
# Tests valid case skill id removal case.
changelist = [
topic_domain.TopicChange({
'cmd': topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC,
'subtopic_id': 2,
'skill_id': self.skill_id_2
}),
topic_domain.TopicChange({
'cmd': topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC,
'subtopic_id': self.subtopic_id,
'skill_id': self.skill_id_1
})
]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(
topic.uncategorized_skill_ids, [self.skill_id_2, self.skill_id_1])
self.assertEqual(topic.subtopics[1].skill_ids, [])
self.assertEqual(topic.subtopics[0].skill_ids, [])
# Tests invalid case where skill id is not present in the subtopic
# from which it is to be removed.
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC,
'subtopic_id': self.subtopic_id,
'skill_id': 'skill_10'
})]
with self.assertRaisesRegexp(
Exception,
'Skill id skill_10 is not present in the old subtopic'):
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
def test_admin_can_manage_topic(self):
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertTrue(topic_services.check_can_edit_topic(
self.user_admin, topic_rights))
def test_filter_published_topic_ids(self):
published_topic_ids = topic_services.filter_published_topic_ids([
self.TOPIC_ID, 'invalid_id'])
self.assertEqual(len(published_topic_ids), 0)
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': self.subtopic_id,
'skill_id': 'skill_1'
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
topic_services.publish_topic(self.TOPIC_ID, self.user_id_admin)
published_topic_ids = topic_services.filter_published_topic_ids([
self.TOPIC_ID, 'invalid_id'])
self.assertEqual(len(published_topic_ids), 1)
self.assertEqual(published_topic_ids[0], self.TOPIC_ID)
def test_publish_and_unpublish_topic(self):
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertFalse(topic_rights.topic_is_published)
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': self.subtopic_id,
'skill_id': 'skill_1'
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
topic_services.publish_topic(self.TOPIC_ID, self.user_id_admin)
with self.assertRaisesRegexp(
Exception,
'The user does not have enough rights to unpublish the topic.'):
topic_services.unpublish_topic(self.TOPIC_ID, self.user_id_a)
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertTrue(topic_rights.topic_is_published)
topic_services.unpublish_topic(self.TOPIC_ID, self.user_id_admin)
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertFalse(topic_rights.topic_is_published)
with self.assertRaisesRegexp(
Exception,
'The user does not have enough rights to publish the topic.'):
topic_services.publish_topic(self.TOPIC_ID, self.user_id_a)
def test_create_new_topic_rights(self):
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertTrue(topic_services.check_can_edit_topic(
self.user_a, topic_rights))
self.assertFalse(topic_services.check_can_edit_topic(
self.user_b, topic_rights))
def test_non_admin_cannot_assign_roles(self):
with self.assertRaisesRegexp(
Exception,
'UnauthorizedUserException: Could not assign new role.'):
topic_services.assign_role(
self.user_b, self.user_a,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertFalse(topic_services.check_can_edit_topic(
self.user_a, topic_rights))
self.assertFalse(topic_services.check_can_edit_topic(
self.user_b, topic_rights))
def test_role_cannot_be_assigned_to_non_topic_manager(self):
with self.assertRaisesRegexp(
Exception,
'The assignee doesn\'t have enough rights to become a manager.'):
topic_services.assign_role(
self.user_admin, self.user_b,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
def test_manager_cannot_assign_roles(self):
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
with self.assertRaisesRegexp(
Exception,
'UnauthorizedUserException: Could not assign new role.'):
topic_services.assign_role(
self.user_a, self.user_b,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertTrue(topic_services.check_can_edit_topic(
self.user_a, topic_rights))
self.assertFalse(topic_services.check_can_edit_topic(
self.user_b, topic_rights))
def test_get_all_topic_rights_of_user(self):
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
topic_rights = topic_services.get_topic_rights_with_user(self.user_id_a)
self.assertEqual(len(topic_rights), 1)
self.assertEqual(topic_rights[0].id, self.TOPIC_ID)
self.assertEqual(topic_rights[0].manager_ids, [self.user_id_a])
def test_cannot_save_new_topic_with_existing_name(self):
with self.assertRaisesRegexp(
Exception, 'Topic with name \'Name\' already exists'):
self.save_new_topic(
'topic_2', self.user_id, name='Name',
abbreviated_name='abbrev', thumbnail_filename=None,
description='Description 2', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
def test_update_topic_language_code(self):
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(topic.language_code, 'en')
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': topic_domain.TOPIC_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id, self.TOPIC_ID, changelist, 'Change language code')
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(topic.language_code, 'bn')
def test_cannot_update_topic_and_subtopic_pages_with_empty_changelist(self):
with self.assertRaisesRegexp(
Exception,
'Unexpected error: received an invalid change list when trying to '
'save topic'):
topic_services.update_topic_and_subtopic_pages(
self.user_id, self.TOPIC_ID, [], 'commit message')
def test_cannot_update_topic_and_subtopic_pages_with_mismatch_of_versions(
self):
topic_model = topic_models.TopicModel.get(self.TOPIC_ID)
topic_model.version = 0
topic_model.commit(self.user_id, 'changed version', [])
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY,
'property_name': topic_domain.TOPIC_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})]
with self.assertRaisesRegexp(
Exception,
'Unexpected error: trying to update version 1 of topic '
'from version 2. Please reload the page and try again.'):
topic_services.update_topic_and_subtopic_pages(
self.user_id, self.TOPIC_ID, changelist, 'change language_code')
topic_model = topic_models.TopicModel.get(self.TOPIC_ID)
topic_model.version = 100
topic_model.commit(self.user_id, 'changed version', [])
with self.assertRaisesRegexp(
Exception,
'Trying to update version 101 of topic from version 2, '
'which is too old. Please reload the page and try again.'):
topic_services.update_topic_and_subtopic_pages(
self.user_id, self.TOPIC_ID, changelist, 'change language_code')
def test_cannot_update_topic_and_subtopic_pages_with_empty_commit_message(
self):
with self.assertRaisesRegexp(
Exception, 'Expected a commit message, received none.'):
topic_services.update_topic_and_subtopic_pages(
self.user_id, self.TOPIC_ID, [], None)
def test_cannot_publish_topic_with_no_topic_rights(self):
with self.assertRaisesRegexp(
Exception, 'The given topic does not exist'):
topic_services.publish_topic('invalid_topic_id', self.user_id_admin)
def test_cannot_publish_a_published_topic(self):
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertFalse(topic_rights.topic_is_published)
changelist = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': self.subtopic_id,
'skill_id': 'skill_1'
})]
topic_services.update_topic_and_subtopic_pages(
self.user_id_admin, self.TOPIC_ID, changelist,
'Updated subtopic skill ids.')
topic_services.publish_topic(self.TOPIC_ID, self.user_id_admin)
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertTrue(topic_rights.topic_is_published)
with self.assertRaisesRegexp(
Exception, 'The topic is already published.'):
topic_services.publish_topic(self.TOPIC_ID, self.user_id_admin)
def test_cannot_unpublish_topic_with_no_topic_rights(self):
with self.assertRaisesRegexp(
Exception, 'The given topic does not exist'):
topic_services.unpublish_topic(
'invalid_topic_id', self.user_id_admin)
def test_cannot_unpublish_an_unpublished_topic(self):
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertFalse(topic_rights.topic_is_published)
with self.assertRaisesRegexp(
Exception, 'The topic is already unpublished.'):
topic_services.unpublish_topic(self.TOPIC_ID, self.user_id_admin)
def test_cannot_edit_topic_with_no_topic_rights(self):
self.assertFalse(topic_services.check_can_edit_topic(self.user_a, None))
def test_cannot_assign_role_with_invalid_role(self):
with self.assertRaisesRegexp(Exception, 'Invalid role'):
topic_services.assign_role(
self.user_admin, self.user_a, 'invalid_role', self.TOPIC_ID)
def test_deassign_user_from_all_topics(self):
self.save_new_topic(
'topic_2', self.user_id, name='Name 2',
abbreviated_name='abbrev', thumbnail_filename=None,
description='Description 2', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
self.save_new_topic(
'topic_3', self.user_id, name='Name 3',
abbreviated_name='abbrev', thumbnail_filename=None,
description='Description 3', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_MANAGER, 'topic_2')
topic_rights = topic_services.get_topic_rights_with_user(self.user_id_a)
self.assertEqual(len(topic_rights), 2)
topic_services.deassign_user_from_all_topics(
self.user_admin, self.user_id_a)
topic_rights = topic_services.get_topic_rights_with_user(self.user_id_a)
self.assertEqual(len(topic_rights), 0)
def test_reassigning_manager_role_to_same_user(self):
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
with self.assertRaisesRegexp(
Exception, 'This user already is a manager for this topic'):
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertTrue(topic_services.check_can_edit_topic(
self.user_a, topic_rights))
self.assertFalse(topic_services.check_can_edit_topic(
self.user_b, topic_rights))
def test_deassigning_manager_role(self):
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_MANAGER, self.TOPIC_ID)
topic_rights = topic_services.get_topic_rights(self.TOPIC_ID)
self.assertTrue(topic_services.check_can_edit_topic(
self.user_a, topic_rights))
self.assertFalse(topic_services.check_can_edit_topic(
self.user_b, topic_rights))
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_NONE, self.TOPIC_ID)
self.assertFalse(topic_services.check_can_edit_topic(
self.user_a, topic_rights))
self.assertFalse(topic_services.check_can_edit_topic(
self.user_b, topic_rights))
topic_services.assign_role(
self.user_admin, self.user_a,
topic_domain.ROLE_NONE, self.TOPIC_ID)
self.assertFalse(topic_services.check_can_edit_topic(
self.user_a, topic_rights))
self.assertFalse(topic_services.check_can_edit_topic(
self.user_b, topic_rights))
# TODO(lilithxxx): Remove this mock class and the SubtopicMigrationTests class
# once the actual functions for subtopic migrations are implemented.
# See issue: https://github.com/oppia/oppia/issues/7009.
class MockTopicObject(topic_domain.Topic):
"""Mocks Topic domain object."""
@classmethod
def _convert_subtopic_v1_dict_to_v2_dict(cls, subtopic):
"""Converts v1 subtopic dict to v2."""
return subtopic
@classmethod
def _convert_story_reference_v1_dict_to_v2_dict(cls, story_reference):
"""Converts v1 story reference dict to v2."""
return story_reference
class SubtopicMigrationTests(test_utils.GenericTestBase):
def test_migrate_subtopic_to_latest_schema(self):
topic_services.create_new_topic_rights('topic_id', 'user_id_admin')
commit_cmd = topic_domain.TopicChange({
'cmd': topic_domain.CMD_CREATE_NEW,
'name': 'name'
})
subtopic_dict = {
'id': 1,
'title': 'subtopic_title',
'skill_ids': []
}
model = topic_models.TopicModel(
id='topic_id',
name='name',
abbreviated_name='abbrev',
canonical_name='Name',
next_subtopic_id=1,
language_code='en',
subtopics=[subtopic_dict],
subtopic_schema_version=1,
story_reference_schema_version=1
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
'user_id_admin', 'topic model created', commit_cmd_dicts)
swap_topic_object = self.swap(topic_domain, 'Topic', MockTopicObject)
current_schema_version_swap = self.swap(
feconf, 'CURRENT_SUBTOPIC_SCHEMA_VERSION', 2)
with swap_topic_object, current_schema_version_swap:
topic = topic_fetchers.get_topic_from_model(model)
self.assertEqual(topic.subtopic_schema_version, 2)
self.assertEqual(topic.name, 'name')
self.assertEqual(topic.canonical_name, 'name')
self.assertEqual(topic.next_subtopic_id, 1)
self.assertEqual(topic.language_code, 'en')
self.assertEqual(len(topic.subtopics), 1)
self.assertEqual(topic.subtopics[0].to_dict(), subtopic_dict)
class StoryReferenceMigrationTests(test_utils.GenericTestBase):
def test_migrate_story_reference_to_latest_schema(self):
topic_services.create_new_topic_rights('topic_id', 'user_id_admin')
commit_cmd = topic_domain.TopicChange({
'cmd': topic_domain.CMD_CREATE_NEW,
'name': 'name'
})
story_reference_dict = {
'story_id': 'story_id',
'story_is_published': False
}
model = topic_models.TopicModel(
id='topic_id',
name='name',
abbreviated_name='abbrev',
canonical_name='Name',
next_subtopic_id=1,
language_code='en',
subtopics=[],
subtopic_schema_version=1,
story_reference_schema_version=1,
canonical_story_references=[story_reference_dict]
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
'user_id_admin', 'topic model created', commit_cmd_dicts)
swap_topic_object = self.swap(topic_domain, 'Topic', MockTopicObject)
current_schema_version_swap = self.swap(
feconf, 'CURRENT_STORY_REFERENCE_SCHEMA_VERSION', 2)
with swap_topic_object, current_schema_version_swap:
topic = topic_fetchers.get_topic_from_model(model)
self.assertEqual(topic.story_reference_schema_version, 2)
self.assertEqual(topic.name, 'name')
self.assertEqual(topic.canonical_name, 'name')
self.assertEqual(topic.next_subtopic_id, 1)
self.assertEqual(topic.language_code, 'en')
self.assertEqual(len(topic.canonical_story_references), 1)
self.assertEqual(
topic.canonical_story_references[0].to_dict(), story_reference_dict)
| 43.461091
| 80
| 0.645175
|
795043f7d3025281eea1e6c9f7a2acc1c46ca269
| 1,043
|
py
|
Python
|
src/mtenv/tests/examples/wrapped_bandit_test.py
|
NagisaZj/ac-teach
|
481811d5c80d0dbee54f16c063b4ea3262b82050
|
[
"MIT"
] | 56
|
2021-02-11T19:15:17.000Z
|
2022-03-03T02:58:18.000Z
|
src/mtenv/tests/examples/wrapped_bandit_test.py
|
NagisaZj/ac-teach
|
481811d5c80d0dbee54f16c063b4ea3262b82050
|
[
"MIT"
] | 1
|
2021-04-12T11:20:16.000Z
|
2021-04-20T22:53:28.000Z
|
src/mtenv/tests/examples/wrapped_bandit_test.py
|
NagisaZj/ac-teach
|
481811d5c80d0dbee54f16c063b4ea3262b82050
|
[
"MIT"
] | 3
|
2021-02-27T11:46:16.000Z
|
2021-11-06T13:59:40.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from gym import spaces
from examples.bandit import BanditEnv # noqa: E402
from examples.wrapped_bandit import MTBanditWrapper # noqa: E402
from tests.utils.utils import validate_mtenv
def get_valid_n_arms() -> List[int]:
return [1, 10, 100]
def get_invalid_n_arms() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_arms", get_valid_n_arms())
def test_ntasks_id_wrapper_with_valid_input(n_arms):
env = MTBanditWrapper(
env=BanditEnv(n_arms),
task_observation_space=spaces.Box(low=0.0, high=1.0, shape=(n_arms,)),
)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_arms", get_invalid_n_arms())
def test_ntasks_id_wrapper_with_invalid_input(n_arms):
with pytest.raises(Exception):
env = MTBanditWrapper(
env=BanditEnv(n_arms),
task_observation_space=spaces.Box(low=0.0, high=1.0, shape=(n_arms,)),
)
validate_mtenv(env=env)
| 26.74359
| 82
| 0.712368
|
795045dfa933cfc06c5e6a09735294df64ac4b91
| 15,101
|
py
|
Python
|
sciibo/tests/test_ai.py
|
fdev/sciibo
|
984ec1945cd0f371bce148c1eb1e811befadb478
|
[
"MIT"
] | 14
|
2017-06-16T14:16:57.000Z
|
2021-02-26T13:53:56.000Z
|
sciibo/tests/test_ai.py
|
fdev/sciibo
|
984ec1945cd0f371bce148c1eb1e811befadb478
|
[
"MIT"
] | 1
|
2018-06-27T16:11:48.000Z
|
2019-01-23T12:02:17.000Z
|
sciibo/tests/test_ai.py
|
fdev/sciibo
|
984ec1945cd0f371bce148c1eb1e811befadb478
|
[
"MIT"
] | null | null | null |
import unittest
from sciibo.bot import ai
from sciibo.core.helpers import nextcard, fitson
def print_layout(stock=None, discards=None, hand=None, builds=None):
layout = "" \
" __ __ __ __ \n" \
" |xx| |xx| |xx| |xx| \n" \
" |__| |__| |__| |__| \n" \
" __ __ __ __ __ __ __ __ __ __ \n" \
"|xx| |xx| |xx| |xx| |xx| |xx|xx|xx|xx|xx|\n" \
"|__| |__| |__| |__| |__| |__|__|__|__|__|"
layout = layout.replace('xx', '%2s')
values = []
for n in range(4):
try:
values.append(builds[n])
except:
values.append('')
values.append(stock or '')
for n in range(4):
try:
values.append(discards[n][0])
except:
values.append('')
for n in range(5):
try:
values.append(hand[n])
except:
values.append('')
print(layout % tuple(values))
for y in range(1, max(len(cards) for cards in discards)):
line1 = " " * 5
line2 = line1
for n in range(4):
try:
line1 += " |%2s|" % discards[n][y]
line2 += " |__|"
except:
line1 += " "
line2 += " "
print(line1)
print(line2)
def print_moves(moves):
if not moves:
print("No moves possible")
return
for value, source, target in moves:
print("Move card with value %s from %s to %s" % (value, source, target))
class TestAI(unittest.TestCase):
"""
Helper methods
"""
def test_next_card(self):
self.assertEqual(nextcard(1), 2)
self.assertEqual(nextcard(2), 3)
self.assertEqual(nextcard(3), 4)
self.assertEqual(nextcard(4), 5)
self.assertEqual(nextcard(5), 6)
self.assertEqual(nextcard(6), 7)
self.assertEqual(nextcard(7), 8)
self.assertEqual(nextcard(8), 9)
self.assertEqual(nextcard(9), 10)
self.assertEqual(nextcard(10), 11)
self.assertEqual(nextcard(11), 12)
self.assertEqual(nextcard(12), 1)
def test_fitson(self):
self.assertTrue(fitson(1, 2))
self.assertTrue(fitson(6, 7))
self.assertTrue(fitson(12, 1))
self.assertTrue(fitson(1, 'SB'))
self.assertTrue(fitson(6, 'SB'))
self.assertTrue(fitson(12, 'SB'))
self.assertFalse(fitson(1, 1))
self.assertFalse(fitson(1, 3))
self.assertFalse(fitson(1, 12))
self.assertFalse(fitson(6, 6))
self.assertFalse(fitson(7, 6))
self.assertFalse(fitson(12, 12))
def test_enumerate_unique(self):
result = list(ai.enumerate_unique([1, 2, 2, 3, 4, 4]))
expected = [
(0, 1),
(1, 2),
(3, 3),
(4, 4),
]
self.assertEqual(result, expected)
def test_top_cards(self):
result = list(ai.top_cards([[1, 2, 3], [], [4, 5, 6], [7, 8, 9]]))
expected = [(0, 3), (2, 6), (3, 9)]
self.assertEqual(result, expected)
def test_pull(self):
result = ai.pull([4, 5, 6, 7, 8], 6)
expected = [4, 5, 7, 8]
self.assertEqual(result, expected)
def test_pull_top(self):
result = ai.pull_top([[1, 2, 3], [], [4, 5, 6], [7, 8, 9]], 2)
expected = [[1, 2, 3], [], [4, 5], [7, 8, 9]]
self.assertEqual(result, expected)
def test_place(self):
result = ai.place([1, 4, 7, 10, 12], 2, 9)
expected = [1, 4, 9, 10, 12]
self.assertEqual(result, expected)
"""
Best moves
"""
def test_best_moves(self):
# Play the hand with least SB and most hand cards
moves = [
# 1 SB, 1 hand cards
[
('SB', 'discard:0', 'build:0'),
(8, 'discard:0', 'build:0'),
(9, 'hand', 'build:0'),
(10, 'discard:1', 'build:0'),
],
# 1 SB, 2 hand cards
[
('SB', 'hand', 'build:0'),
(8, 'discard:0', 'build:0'),
(9, 'hand', 'build:0'),
(10, 'discard:1', 'build:0'),
],
# 2 SB, 2 hand cards
[
('SB', 'hand', 'build:0'),
('SB', 'discard:0', 'build:0'),
(9, 'hand', 'build:0'),
(10, 'discard:1', 'build:0'),
],
# 1 SB, 0 hand cards
[
('SB', 'discard:0', 'build:0'),
(8, 'discard:0', 'build:0'),
(9, 'discard:2', 'build:0'),
(10, 'discard:1', 'build:0'),
],
]
expected = moves[1]
result = ai.best_moves(moves)
self.assertEqual(result, expected)
"""
Lucky move
"""
def test_lucky_move(self):
# Stock before hand and discard
values = {
'stock': 5,
'discards': [[8], ['SB', 2, 5], [], []],
'hand': [9, 7, 12, 5, 'SB'],
'builds': [2, 3, 1, 4],
}
self.assertEqual(ai.lucky_move(**values), [(5, 'stock', 'build:3')])
# Hand number before hand SB and discard
values = {
'stock': 1,
'discards': [[8], ['SB', 2, 5], [], []],
'hand': [9, 7, 12, 5, 'SB'],
'builds': [2, 3, 1, 4],
}
self.assertEqual(ai.lucky_move(**values), [(5, 'hand', 'build:3')])
# Discard number before hand SB
values = {
'stock': 1,
'discards': [[8], ['SB', 2, 5], [], []],
'hand': [9, 7, 12, 'SB'],
'builds': [2, 3, 1, 4],
}
self.assertEqual(ai.lucky_move(**values), [(5, 'discard:1', 'build:3')])
# Hand SB before discard SB
values = {
'stock': 1,
'discards': [[8], [9, 'SB'], [], []],
'hand': [9, 7, 12, 'SB'],
'builds': [2, 3, 1, 4],
}
self.assertEqual(ai.lucky_move(**values), [('SB', 'hand', 'build:0')])
# Discard SB
values = {
'stock': 1,
'discards': [[8], [9, 'SB'], [], []],
'hand': [9, 7, 12],
'builds': [2, 3, 1, 4],
}
self.assertEqual(ai.lucky_move(**values), [('SB', 'discard:1', 'build:0')])
# No moves
values = {
'stock': 1,
'discards': [[8], [2, 9], [], []],
'hand': [9, 7, 12],
'builds': [2, 3, 1, 4],
}
self.assertEqual(ai.lucky_move(**values), None)
"""
Discard move
"""
def test_discard_move(self):
# Same number value
values = {
'discards': [[8], [5], [9], [12]],
'hand': [11, 9, 7, 4, 'SB'],
}
self.assertEqual(ai.discard_move(**values), [(9, 'hand', 'discard:2')])
# Empty discard pile
values = {
'discards': [[8], [5], [], [12]],
'hand': [11, 7, 4, 'SB'],
}
result = ai.discard_move(**values)
value, source, target = result[0] # First move
self.assertIn(value, values['hand'])
self.assertEqual(source, 'hand')
self.assertEqual(target, 'discard:2')
# Count down
values = {
'discards': [[8], [5], [9], [12]],
'hand': [11, 7, 4, 'SB'],
}
self.assertEqual(ai.discard_move(**values), [(7, 'hand', 'discard:0')])
# Same SB value
values = {
'discards': [[8], [5], ['SB'], []],
'hand': [12, 7, 4, 'SB'],
}
self.assertEqual(ai.discard_move(**values), [('SB', 'hand', 'discard:2')])
# Falls back to random choice, kind of difficult to test
"""
Any move
"""
def test_any_move(self):
# No moves
values = {
'stock': 1,
'discards': [[8], [2, 9], [], []],
'hand': [9, 7, 12],
'builds': [2, 3, 1, 4],
}
self.assertFalse(ai.any_move(**values))
# Number
values = {
'stock': 5,
'discards': [[8], [2, 9], [], []],
'hand': [9, 7, 12],
'builds': [2, 3, 1, 4],
}
self.assertTrue(ai.any_move(**values))
values = {
'stock': 1,
'discards': [[8], [2, 4], [], []],
'hand': [9, 7, 12],
'builds': [2, 3, 1, 4],
}
self.assertTrue(ai.any_move(**values))
values = {
'stock': 1,
'discards': [[8], [2, 9], [], []],
'hand': [9, 7, 12, 3],
'builds': [2, 3, 1, 4],
}
self.assertTrue(ai.any_move(**values))
# SB
values = {
'stock': 'SB',
'discards': [[8], [2, 9], [], []],
'hand': [9, 7, 12],
'builds': [2, 3, 1, 4],
}
self.assertTrue(ai.any_move(**values))
values = {
'stock': 1,
'discards': [[8], [2, 'SB'], [], []],
'hand': [9, 7, 12],
'builds': [2, 3, 1, 4],
}
self.assertTrue(ai.any_move(**values))
values = {
'stock': 1,
'discards': [[8], [2, 9], [], []],
'hand': [9, 7, 12, 'SB'],
'builds': [2, 3, 1, 4],
}
self.assertTrue(ai.any_move(**values))
"""
Stock moves
"""
def test_stock_non_greedy_hand(self):
# 3 must be played from discard instead of hand to release 12
values = {
'stock': 1,
'discards': [[12, 3], [7], [2], []],
'hand': [3, 9, 7, 10],
'builds': [11, 1, 6, 9],
}
expected = [
(2, 'discard:2', 'build:1'),
(3, 'discard:0', 'build:1'),
(12, 'discard:0', 'build:0'),
(1, 'stock', 'build:0'),
]
result = ai.stock_moves(**values)
self.assertEqual(result, expected)
"""
Most moves
"""
def test_most_number_over_sb(self):
# SB must not be played, does not help us
values = {
'discards': [['SB'], [6], [7], [8]],
'hand': [6, 8],
'builds': [1, 5, 12, 12],
}
expected = [
(6, 'hand', 'build:1'),
(7, 'discard:2', 'build:1'),
(8, 'hand', 'build:1'),
]
result = ai.most_moves(**values)
self.assertEqual(result, expected)
def test_most_clear(self):
# Clear hand
values = {
'discards': [[], [], [], []],
'hand': [6, 7, 8, 'SB', 2],
'builds': [5, 12, 12, 12],
}
expected = [
('SB', 'hand', 'build:1'),
(2, 'hand', 'build:1'),
(6, 'hand', 'build:0'),
(7, 'hand', 'build:0'),
(8, 'hand', 'build:0'),
]
result = ai.most_moves(**values)
self.assertEqual(result, expected)
def test_most_keep_sb(self):
# Don't play SB card
values = {
'discards': [[], [], [], []],
'hand': [6, 7, 8, 'SB', 3],
'builds': [5, 12, 12, 12],
}
expected = [
(6, 'hand', 'build:0'),
(7, 'hand', 'build:0'),
(8, 'hand', 'build:0'),
]
result = ai.most_moves(**values)
self.assertEqual(result, expected)
def test_most_sb_to_clear_hand(self):
# Play SB to clear hand
values = {
'discards': [[], [], [], []],
'hand': [6, 7, 8, 'SB'],
'builds': [5, 12, 12, 12],
}
expected = [
(6, 'hand', 'build:0'),
(7, 'hand', 'build:0'),
(8, 'hand', 'build:0'),
('SB', 'hand', 'build:0'),
]
result = ai.most_moves(**values)
self.assertEqual(result, expected)
def test_most_play_hand_sb_keep_discard_sb(self):
# Play SB to clear hand, but don't play SB in discards
values = {
'discards': [['SB'], [], [], []],
'hand': [6, 7, 8, 'SB'],
'builds': [5, 1, 12, 12],
}
expected = [
(6, 'hand', 'build:0'),
(7, 'hand', 'build:0'),
(8, 'hand', 'build:0'),
('SB', 'hand', 'build:0'),
]
result = ai.most_moves(**values)
self.assertEqual(result, expected)
def test_most_dont_waste_sb(self):
# Don't waste SB cards to clear hand
values = {
'discards': [[], [], [], []],
'hand': [2, 'SB', 'SB'],
'builds': [1, 2, 3, 4],
}
expected = [
(2, 'hand', 'build:0'),
]
result = ai.most_moves(**values)
self.assertEqual(result, expected)
"""
Calculate moves
"""
def test_calculate_terminates(self):
# Large calculations must terminate
values = {
'stock': 26,
'discards': [[7, 6, 5, 4, 2], [11, 11, 9, 8, 6], [11], [10]],
'hand': ['SB', 'SB', 'SB', 'SB', 'SB'],
'builds': [1, 2, 3, 4],
}
expected = [
(2, 'discard:0', 'build:0'),
]
result = ai.calculate_move(timeout=1, **values)
self.assertEqual(result, expected)
"""
Specific bugs
"""
def test_bug(self):
values = {
'stock': 1,
'discards': [[], [], [], []],
'hand': [3, 7, 2, 11, 12],
'builds': [12, 12, 12, 12],
}
expected = [
(1, 'stock', 'build:0'),
]
result = ai.calculate_move(**values)
self.assertEqual(result, expected)
def test_bug_1(self):
# Unexpected behavior:
# result: [('SB', 'hand', 'build:0')]
values = {
'stock': 6,
'discards': [[6], [7], [], []],
'hand': [8, 7, 12, 9, 'SB'],
'builds': [3, 12, 12, 12],
}
expected = [
(7, 'hand', 'discard:1'),
]
result = ai.calculate_move(**values)
self.assertEqual(result, expected)
def test_bug_2(self):
# Unexpected behavior:
# result: [('SB', 'hand', 'build:0')]
values = {
'stock': 7,
'discards': [[9], [], [], []],
'hand': [10, 'SB', 6],
'builds': [10, 2, 2, 1],
}
expected = [
[(6, 'hand', 'discard:1')],
[(10, 'hand', 'discard:1')],
]
result = ai.calculate_move(**values)
self.assertIn(result, expected)
def test_bug_3(self):
values = {
'stock': 12,
'discards': [[], [], [], []],
'hand': ['SB', 'SB', 9],
'builds': [4, 12, 12, 12],
}
expected = [
(9, 'hand', 'discard:0'),
]
result = ai.calculate_move(**values)
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
| 28.984645
| 83
| 0.417456
|
795045f34cc14295a4f5d5dd8810e2a630dc4eb4
| 837
|
py
|
Python
|
Assignment_5/Graph_Plotter.py
|
pkck28/Applied-Computation-in-Mechanical-Sciences
|
411d56faae2291fd056cd2f1a3e09e2854f84b49
|
[
"MIT"
] | null | null | null |
Assignment_5/Graph_Plotter.py
|
pkck28/Applied-Computation-in-Mechanical-Sciences
|
411d56faae2291fd056cd2f1a3e09e2854f84b49
|
[
"MIT"
] | null | null | null |
Assignment_5/Graph_Plotter.py
|
pkck28/Applied-Computation-in-Mechanical-Sciences
|
411d56faae2291fd056cd2f1a3e09e2854f84b49
|
[
"MIT"
] | null | null | null |
# Importing the required module
import pandas as pd
import matplotlib.pyplot as plt
# Reading Data from CSV File
data = pd.read_csv("/home/pavan/Desktop/ACMS/Assignment_5/GSS_Error.csv")
# Ploting Graph for error variation
GSS_figure = plt.figure(1)
plt.plot(data.index,data["Ea"],'b')
plt.xlabel("Number of iteration")
plt.ylabel("Absolute Error after each iteration")
plt.title("Golden Section Search Method Error Variation")
plt.grid()
GSS_figure.show()
# Reading Data from CSV File
data = pd.read_csv("/home/pavan/Desktop/ACMS/Assignment_5/New_Error.csv")
# Ploting Graph for error variation
NM_figure = plt.figure(2)
plt.plot(data.index,data["Ea"],'b')
plt.xlabel("Number of iteration")
plt.ylabel("Absolute Error after each iteration")
plt.title("Newton's Method Error Variation")
plt.grid()
NM_figure.show()
plt.show()
| 28.862069
| 73
| 0.757467
|
795047528e25d1c1ca7338ed67ea346c26db6941
| 709
|
py
|
Python
|
src/google/appengine/api/runtime/__init__.py
|
phil-lopreiato/appengine-python-standard
|
5e2c400a24d299bb86e98f755a6ef510b4e1e0df
|
[
"Apache-2.0"
] | 28
|
2021-01-06T19:55:21.000Z
|
2022-03-28T09:41:08.000Z
|
src/google/appengine/api/runtime/__init__.py
|
SOFTWARESOLUTONS-PVT-LIMITED/appengine-python-standard
|
530a54b0fc0eb74d9dc29b19b7c4cdfab0556ebc
|
[
"Apache-2.0"
] | 13
|
2021-06-17T09:38:17.000Z
|
2022-03-11T01:12:33.000Z
|
src/google/appengine/api/runtime/__init__.py
|
SOFTWARESOLUTONS-PVT-LIMITED/appengine-python-standard
|
530a54b0fc0eb74d9dc29b19b7c4cdfab0556ebc
|
[
"Apache-2.0"
] | 28
|
2021-03-09T19:27:37.000Z
|
2022-01-21T21:18:52.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Runtime Utilities API.
.. deprecated:: 1.8.1
"""
from google.appengine.api.runtime.runtime import *
| 26.259259
| 74
| 0.7433
|
795047ae1cf7c59c07e8dd149ebe8cfc3286b0db
| 78,921
|
py
|
Python
|
snakemake/dag.py
|
scholer/snakemake
|
99de496322f4813fea590ee50607be8042f176d5
|
[
"MIT"
] | null | null | null |
snakemake/dag.py
|
scholer/snakemake
|
99de496322f4813fea590ee50607be8042f176d5
|
[
"MIT"
] | null | null | null |
snakemake/dag.py
|
scholer/snakemake
|
99de496322f4813fea590ee50607be8042f176d5
|
[
"MIT"
] | null | null | null |
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015-2019, Johannes Köster"
__email__ = "koester@jimmy.harvard.edu"
__license__ = "MIT"
import html
import os
import shutil
import textwrap
import time
import tarfile
from collections import defaultdict, Counter, deque, namedtuple
from itertools import chain, filterfalse, groupby
from functools import partial
from pathlib import Path
import uuid
import math
from snakemake.io import PeriodicityDetector, wait_for_files, is_flagged, IOFile
from snakemake.jobs import Reason, JobFactory, GroupJobFactory, Job
from snakemake.exceptions import MissingInputException
from snakemake.exceptions import MissingRuleException, AmbiguousRuleException
from snakemake.exceptions import CyclicGraphException, MissingOutputException
from snakemake.exceptions import IncompleteFilesException, ImproperOutputException
from snakemake.exceptions import PeriodicWildcardError
from snakemake.exceptions import RemoteFileException, WorkflowError, ChildIOException
from snakemake.exceptions import InputFunctionException
from snakemake.logging import logger
from snakemake.common import DYNAMIC_FILL, group_into_chunks
from snakemake.deployment import conda, singularity
from snakemake.output_index import OutputIndex
from snakemake import workflow
PotentialDependency = namedtuple("PotentialDependency", ["file", "jobs", "known"])
class Batch:
"""Definition of a batch for calculating only a partial DAG."""
def __init__(self, rulename: str, idx: int, batches: int):
assert idx <= batches
assert idx > 0
self.rulename = rulename
self.idx = idx
self.batches = batches
def get_batch(self, items: list):
"""Return the defined batch of the given items.
Items are usually input files."""
# make sure that we always consider items in the same order
if len(items) < self.batches:
raise WorkflowError(
"Batching rule {} has less input files than batches. "
"Please choose a smaller number of batches.".format(self.rulename)
)
items = sorted(items)
batch_len = math.floor(len(items) / self.batches)
# self.batch is one-based, hence we have to subtract 1
idx = self.idx - 1
i = idx * batch_len
if self.is_final:
# extend the last batch to cover rest of list
return items[i:]
else:
return items[i : i + batch_len]
@property
def is_final(self):
return self.idx == self.batches
def __str__(self):
return "{}/{} (rule {})".format(self.idx, self.batches, self.rulename)
class DAG:
"""Directed acyclic graph of jobs."""
def __init__(
self,
workflow,
rules=None,
dryrun=False,
targetfiles=None,
targetrules=None,
forceall=False,
forcerules=None,
forcefiles=None,
priorityfiles=None,
priorityrules=None,
untilfiles=None,
untilrules=None,
omitfiles=None,
omitrules=None,
ignore_ambiguity=False,
force_incomplete=False,
ignore_incomplete=False,
notemp=False,
keep_remote_local=False,
batch=None,
):
self.dryrun = dryrun
self.dependencies = defaultdict(partial(defaultdict, set))
self.depending = defaultdict(partial(defaultdict, set))
self._needrun = set()
self._priority = dict()
self._reason = defaultdict(Reason)
self._finished = set()
self._dynamic = set()
self._len = 0
self.workflow = workflow
self.rules = set(rules)
self.ignore_ambiguity = ignore_ambiguity
self.targetfiles = targetfiles
self.targetrules = targetrules
self.priorityfiles = priorityfiles
self.priorityrules = priorityrules
self.targetjobs = set()
self.prioritytargetjobs = set()
self._ready_jobs = set()
self.notemp = notemp
self.keep_remote_local = keep_remote_local
self._jobid = dict()
self.job_cache = dict()
self.conda_envs = dict()
self.container_imgs = dict()
self._progress = 0
self._group = dict()
self._n_until_ready = defaultdict(int)
self._running = set()
self.job_factory = JobFactory()
self.group_job_factory = GroupJobFactory()
self.forcerules = set()
self.forcefiles = set()
self.untilrules = set()
self.untilfiles = set()
self.omitrules = set()
self.omitfiles = set()
self.updated_subworkflow_files = set()
if forceall:
self.forcerules.update(self.rules)
elif forcerules:
self.forcerules.update(forcerules)
if forcefiles:
self.forcefiles.update(forcefiles)
if untilrules:
self.untilrules.update(set(rule.name for rule in untilrules))
if untilfiles:
self.untilfiles.update(untilfiles)
if omitrules:
self.omitrules.update(set(rule.name for rule in omitrules))
if omitfiles:
self.omitfiles.update(omitfiles)
self.has_dynamic_rules = any(rule.dynamic_output for rule in self.rules)
self.omitforce = set()
self.batch = batch
if batch is not None and not batch.is_final:
# Since not all input files of a batching rule are considered, we cannot run
# beyond that rule.
# For the final batch, we do not need to omit anything.
self.omitrules.add(batch.rulename)
self.force_incomplete = force_incomplete
self.ignore_incomplete = ignore_incomplete
self.periodic_wildcard_detector = PeriodicityDetector()
self.update_output_index()
def init(self, progress=False):
""" Initialise the DAG. """
for job in map(self.rule2job, self.targetrules):
job = self.update([job], progress=progress, create_inventory=True)
self.targetjobs.add(job)
for file in self.targetfiles:
job = self.update(
self.file2jobs(file),
file=file,
progress=progress,
create_inventory=True,
)
self.targetjobs.add(job)
self.cleanup()
self.check_incomplete()
self.update_needrun(create_inventory=True)
self.set_until_jobs()
self.delete_omitfrom_jobs()
self.update_jobids()
self.check_directory_outputs()
# check if remaining jobs are valid
for i, job in enumerate(self.jobs):
job.is_valid()
def check_directory_outputs(self):
"""Check that no output file is contained in a directory output of the same or another rule."""
outputs = sorted(
{(os.path.abspath(f), job) for job in self.jobs for f in job.output}
)
for i in range(len(outputs) - 1):
(a, job_a), (b, job_b) = outputs[i : i + 2]
try:
common = os.path.commonpath([a, b])
except ValueError:
# commonpath raises error if windows drives are different.
continue
if a != b and common == os.path.commonpath([a]) and job_a != job_b:
raise ChildIOException(parent=outputs[i], child=outputs[i + 1])
@property
def checkpoint_jobs(self):
for job in self.needrun_jobs:
if job.is_checkpoint:
yield job
def update_checkpoint_outputs(self):
workflow.checkpoints.future_output = set(
f for job in self.checkpoint_jobs for f in job.output
)
def update_jobids(self):
for job in self.jobs:
if job not in self._jobid:
self._jobid[job] = len(self._jobid)
def cleanup_workdir(self):
for io_dir in set(
os.path.dirname(io_file)
for job in self.jobs
for io_file in chain(job.output, job.input)
if not os.path.exists(io_file)
):
if os.path.exists(io_dir) and not len(os.listdir(io_dir)):
os.removedirs(io_dir)
def cleanup(self):
self.job_cache.clear()
final_jobs = set(self.bfs(self.dependencies, *self.targetjobs))
todelete = [job for job in self.dependencies if job not in final_jobs]
for job in todelete:
try:
self._needrun.remove(job)
except KeyError:
pass
del self.dependencies[job]
try:
del self.depending[job]
except KeyError:
pass
def create_conda_envs(
self, dryrun=False, forceall=False, init_only=False, quiet=False
):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
env_set = {
(job.conda_env_file, job.container_img_url)
for job in jobs
if job.conda_env_file
}
# Then based on md5sum values
self.conda_envs = dict()
for (env_file, simg_url) in env_set:
simg = None
if simg_url and self.workflow.use_singularity:
assert (
simg_url in self.container_imgs
), "bug: must first pull singularity images"
simg = self.container_imgs[simg_url]
env = conda.Env(
env_file,
self,
container_img=simg,
cleanup=self.workflow.conda_cleanup_pkgs,
)
self.conda_envs[(env_file, simg_url)] = env
if not init_only:
for env in self.conda_envs.values():
if not dryrun or not quiet:
env.create(dryrun)
def pull_container_imgs(self, dryrun=False, forceall=False, quiet=False):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
img_set = {job.container_img_url for job in jobs if job.container_img_url}
for img_url in img_set:
img = singularity.Image(img_url, self)
if not dryrun or not quiet:
img.pull(dryrun)
self.container_imgs[img_url] = img
def update_output_index(self):
"""Update the OutputIndex."""
self.output_index = OutputIndex(self.rules)
def check_incomplete(self):
"""Check if any output files are incomplete. This is done by looking up
markers in the persistence module."""
if not self.ignore_incomplete:
incomplete = self.incomplete_files
if incomplete:
if self.force_incomplete:
logger.debug("Forcing incomplete files:")
logger.debug("\t" + "\n\t".join(incomplete))
self.forcefiles.update(incomplete)
else:
raise IncompleteFilesException(incomplete)
def incomplete_external_jobid(self, job):
"""Return the external jobid of the job if it is marked as incomplete.
Returns None, if job is not incomplete, or if no external jobid has been
registered or if force_incomplete is True.
"""
if self.force_incomplete:
return None
jobids = self.workflow.persistence.external_jobids(job)
if len(jobids) == 1:
return jobids[0]
elif len(jobids) > 1:
raise WorkflowError(
"Multiple different external jobids registered "
"for output files of incomplete job {} ({}). This job "
"cannot be resumed. Execute Snakemake with --rerun-incomplete "
"to fix this issue.".format(job.jobid, jobids)
)
def check_dynamic(self):
"""Check dynamic output and update downstream rules if necessary."""
if self.has_dynamic_rules:
for job in filter(
lambda job: (job.dynamic_output and not self.needrun(job)),
list(self.jobs),
):
self.update_dynamic(job)
self.postprocess()
def is_edit_notebook_job(self, job):
return self.workflow.edit_notebook and job.targetfile in self.targetfiles
@property
def dynamic_output_jobs(self):
"""Iterate over all jobs with dynamic output files."""
return (job for job in self.jobs if job.dynamic_output)
@property
def jobs(self):
""" All jobs in the DAG. """
return self.dependencies.keys()
@property
def needrun_jobs(self):
""" Jobs that need to be executed. """
return filterfalse(self.finished, self._needrun)
@property
def local_needrun_jobs(self):
"""Iterate over all jobs that need to be run and are marked as local."""
return filter(lambda job: job.is_local, self.needrun_jobs)
@property
def finished_jobs(self):
""" Iterate over all jobs that have been finished."""
return filter(self.finished, self.jobs)
@property
def ready_jobs(self):
"""Jobs that are ready to execute."""
return self._ready_jobs
def needrun(self, job):
"""Return whether a given job needs to be executed."""
return job in self._needrun
def priority(self, job):
"""Return priority of given job."""
return self._priority[job]
def noneedrun_finished(self, job):
"""
Return whether a given job is finished or was not
required to run at all.
"""
return not self.needrun(job) or self.finished(job)
def reason(self, job):
""" Return the reason of the job execution. """
return self._reason[job]
def finished(self, job):
""" Return whether a job is finished. """
return job in self._finished
def dynamic(self, job):
"""
Return whether a job is dynamic (i.e. it is only a placeholder
for those that are created after the job with dynamic output has
finished.
"""
if job.is_group():
for j in job:
if j in self._dynamic:
return True
else:
return job in self._dynamic
def requested_files(self, job):
"""Return the files a job requests."""
return set(*self.depending[job].values())
@property
def incomplete_files(self):
"""Return list of incomplete files."""
return list(
chain(
*(
job.output
for job in filter(
self.workflow.persistence.incomplete,
filterfalse(self.needrun, self.jobs),
)
)
)
)
@property
def newversion_files(self):
"""Return list of files where the current version is newer than the
recorded version.
"""
return list(
chain(
*(
job.output
for job in filter(self.workflow.persistence.newversion, self.jobs)
)
)
)
def missing_temp(self, job):
"""
Return whether a temp file that is input of the given job is missing.
"""
for job_, files in self.depending[job].items():
if self.needrun(job_) and any(not f.exists for f in files):
return True
return False
def check_and_touch_output(
self,
job,
wait=3,
ignore_missing_output=False,
no_touch=False,
force_stay_on_remote=False,
):
""" Raise exception if output files of job are missing. """
expanded_output = [job.shadowed_path(path) for path in job.expanded_output]
if job.benchmark:
expanded_output.append(job.benchmark)
if not ignore_missing_output:
try:
wait_for_files(
expanded_output,
latency_wait=wait,
force_stay_on_remote=force_stay_on_remote,
ignore_pipe=True,
)
except IOError as e:
raise MissingOutputException(
str(e) + "\nThis might be due to "
"filesystem latency. If that is the case, consider to increase the "
"wait time with --latency-wait."
+ "\nJob id: {jobid}".format(jobid=job.jobid),
rule=job.rule,
jobid=self.jobid(job),
)
# Ensure that outputs are of the correct type (those flagged with directory()
# are directories and not files and vice versa). We can't check for remote objects
for f in expanded_output:
if (f.is_directory and not f.remote_object and not os.path.isdir(f)) or (
not f.remote_object and os.path.isdir(f) and not f.is_directory
):
raise ImproperOutputException(job.rule, [f])
# It is possible, due to archive expansion or cluster clock skew, that
# the files appear older than the input. But we know they must be new,
# so touch them to update timestamps. This also serves to touch outputs
# when using the --touch flag.
# Note that if the input files somehow have a future date then this will
# not currently be spotted and the job will always be re-run.
if not no_touch:
for f in expanded_output:
# This won't create normal files if missing, but will create
# the flag file for directories.
if f.exists_local:
f.touch()
def unshadow_output(self, job, only_log=False):
""" Move files from shadow directory to real output paths. """
if not job.shadow_dir or not job.expanded_output:
return
files = job.log if only_log else chain(job.expanded_output, job.log)
for real_output in files:
shadow_output = job.shadowed_path(real_output).file
# Remake absolute symlinks as relative
if os.path.islink(shadow_output):
dest = os.readlink(shadow_output)
if os.path.isabs(dest):
rel_dest = os.path.relpath(dest, job.shadow_dir)
os.remove(shadow_output)
os.symlink(rel_dest, shadow_output)
if os.path.realpath(shadow_output) == os.path.realpath(real_output):
continue
logger.debug(
"Moving shadow output {} to destination {}".format(
shadow_output, real_output
)
)
shutil.move(shadow_output, real_output)
shutil.rmtree(job.shadow_dir)
def check_periodic_wildcards(self, job):
"""Raise an exception if a wildcard of the given job appears to be periodic,
indicating a cyclic dependency."""
for wildcard, value in job.wildcards_dict.items():
periodic_substring = self.periodic_wildcard_detector.is_periodic(value)
if periodic_substring is not None:
raise PeriodicWildcardError(
"The value {} in wildcard {} is periodically repeated ({}). "
"This would lead to an infinite recursion. "
"To avoid this, e.g. restrict the wildcards in this rule to certain values.".format(
periodic_substring, wildcard, value
),
rule=job.rule,
)
def handle_protected(self, job):
""" Write-protect output files that are marked with protected(). """
for f in job.expanded_output:
if f in job.protected_output:
logger.info("Write-protecting output file {}.".format(f))
f.protect()
def handle_touch(self, job):
""" Touches those output files that are marked for touching. """
for f in job.expanded_output:
if f in job.touch_output:
f = job.shadowed_path(f)
logger.info("Touching output file {}.".format(f))
f.touch_or_create()
assert os.path.exists(f)
def temp_input(self, job):
for job_, files in self.dependencies[job].items():
for f in filter(job_.temp_output.__contains__, files):
yield f
def temp_size(self, job):
"""Return the total size of temporary input files of the job.
If none, return 0.
"""
return sum(f.size for f in self.temp_input(job))
def handle_temp(self, job):
""" Remove temp files if they are no longer needed. Update temp_mtimes. """
if self.notemp:
return
is_temp = lambda f: is_flagged(f, "temp")
# handle temp input
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
# temp input
for job_, files in self.dependencies[job].items():
tempfiles = set(f for f in job_.expanded_output if is_temp(f))
yield from filterfalse(partial(needed, job_), tempfiles & files)
# temp output
if not job.dynamic_output and (
job not in self.targetjobs or job.rule.name == self.workflow.first_rule
):
tempfiles = (
f
for f in job.expanded_output
if is_temp(f) and f not in self.targetfiles
)
yield from filterfalse(partial(needed, job), tempfiles)
for f in unneeded_files():
logger.info("Removing temporary output file {}.".format(f))
f.remove(remove_non_empty_dir=True)
def handle_log(self, job, upload_remote=True):
for f in job.log:
if not f.exists_local:
# If log file was not created during job, create an empty one.
f.touch_or_create()
if upload_remote and f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
def handle_remote(self, job, upload=True):
""" Remove local files if they are no longer needed and upload. """
if upload:
# handle output files
files = job.expanded_output
if job.benchmark:
files = chain(job.expanded_output, (job.benchmark,))
for f in files:
if f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
remote_mtime = f.mtime.remote()
# immediately force local mtime to match remote,
# since conversions from S3 headers are not 100% reliable
# without this, newness comparisons may fail down the line
f.touch(times=(remote_mtime, remote_mtime))
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
if not self.keep_remote_local:
if not any(f.is_remote for f in job.input):
return
# handle input files
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
putative = (
lambda f: f.is_remote
and not f.protected
and not f.should_keep_local
)
generated_input = set()
for job_, files in self.dependencies[job].items():
generated_input |= files
for f in filter(putative, files):
if not needed(job_, f):
yield f
for f, f_ in zip(job.output, job.rule.output):
if putative(f) and not needed(job, f) and not f in self.targetfiles:
if f in job.dynamic_output:
for f_ in job.expand_dynamic(f_):
yield f_
else:
yield f
for f in filter(putative, job.input):
# TODO what about remote inputs that are used by multiple jobs?
if f not in generated_input:
yield f
for f in unneeded_files():
if f.exists_local:
logger.info("Removing local copy of remote file: {}".format(f))
f.remove()
def jobid(self, job):
"""Return job id of given job."""
if job.is_group():
return job.jobid
else:
return self._jobid[job]
def update(
self,
jobs,
file=None,
visited=None,
known_producers=None,
skip_until_dynamic=False,
progress=False,
create_inventory=False,
):
""" Update the DAG by adding given jobs and their dependencies. """
if visited is None:
visited = set()
if known_producers is None:
known_producers = dict()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=not self.ignore_ambiguity)
cycles = list()
for job in jobs:
logger.dag_debug(dict(status="candidate", job=job))
if file in job.input:
cycles.append(job)
continue
if job in visited:
cycles.append(job)
continue
try:
self.check_periodic_wildcards(job)
self.update_(
job,
visited=set(visited),
known_producers=known_producers,
skip_until_dynamic=skip_until_dynamic,
progress=progress,
create_inventory=create_inventory,
)
# TODO this might fail if a rule discarded here is needed
# elsewhere
if producer:
if job < producer or self.ignore_ambiguity:
break
elif producer is not None:
raise AmbiguousRuleException(file, job, producer)
producer = job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
exceptions.append(ex)
except RecursionError as e:
raise WorkflowError(
e,
"If building the DAG exceeds the recursion limit, "
"this is likely due to a cyclic dependency."
"E.g. you might have a sequence of rules that "
"can generate their own input. Try to make "
"the output files more specific. "
"A common pattern is to have different prefixes "
"in the output files of different rules."
+ "\nProblematic file pattern: {}".format(file)
if file
else "",
)
if producer is None:
if cycles:
job = cycles[0]
raise CyclicGraphException(job.rule, file, rule=job.rule)
if len(exceptions) > 1:
raise WorkflowError(*exceptions)
elif len(exceptions) == 1:
raise exceptions[0]
else:
logger.dag_debug(dict(status="selected", job=producer))
logger.dag_debug(
dict(
file=file,
msg="Producer found, hence exceptions are ignored.",
exception=WorkflowError(*exceptions),
)
)
n = len(self.dependencies)
if progress and n % 1000 == 0 and n and self._progress != n:
logger.info("Processed {} potential jobs.".format(n))
self._progress = n
return producer
def update_(
self,
job,
visited=None,
known_producers=None,
skip_until_dynamic=False,
progress=False,
create_inventory=False,
):
""" Update the DAG by adding the given job and its dependencies. """
if job in self.dependencies:
return
if visited is None:
visited = set()
if known_producers is None:
known_producers = dict()
visited.add(job)
dependencies = self.dependencies[job]
potential_dependencies = self.collect_potential_dependencies(
job, known_producers=known_producers
)
skip_until_dynamic = skip_until_dynamic and not job.dynamic_output
missing_input = set()
producer = dict()
exceptions = dict()
for res in potential_dependencies:
if create_inventory:
# If possible, obtain inventory information starting from
# given file and store it in the IOCache.
# This should provide faster access to existence and mtime information
# than querying file by file. If the file type does not support inventory
# information, this call is a no-op.
res.file.inventory()
if not res.jobs:
# no producing job found
if not res.file.exists:
# file not found, hence missing input
missing_input.add(res.file)
known_producers[res.file] = None
# file found, no problem
continue
if res.known:
producer[res.file] = res.jobs[0]
else:
try:
selected_job = self.update(
res.jobs,
file=res.file,
visited=visited,
known_producers=known_producers,
skip_until_dynamic=skip_until_dynamic
or res.file in job.dynamic_input,
progress=progress,
)
producer[res.file] = selected_job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
if not res.file.exists:
self.delete_job(job, recursive=False) # delete job from tree
raise ex
else:
logger.dag_debug(
dict(
file=res.file,
msg="No producers found, but file is present on disk.",
exception=ex,
)
)
known_producers[res.file] = None
for file, job_ in producer.items():
dependencies[job_].add(file)
self.depending[job_][job].add(file)
if self.is_batch_rule(job.rule) and self.batch.is_final:
# For the final batch, ensure that all input files from
# previous batches are present on disk.
if any((f not in producer and not f.exists) for f in job.input):
raise WorkflowError(
"Unable to execute batch {} because not all previous batches "
"have been completed before or files have been deleted.".format(
self.batch
)
)
if missing_input:
self.delete_job(job, recursive=False) # delete job from tree
raise MissingInputException(job.rule, missing_input)
if skip_until_dynamic:
self._dynamic.add(job)
def update_needrun(self, create_inventory=False):
""" Update the information whether a job needs to be executed. """
if create_inventory:
# Concurrently collect mtimes of all existing files.
self.workflow.iocache.mtime_inventory(self.jobs)
output_mintime = dict()
def update_output_mintime(job):
try:
return output_mintime[job]
except KeyError:
for job_ in chain([job], self.depending[job]):
try:
t = output_mintime[job_]
except KeyError:
t = job_.output_mintime
if t is not None:
output_mintime[job] = t
return
output_mintime[job] = None
def update_needrun(job):
reason = self.reason(job)
noinitreason = not reason
updated_subworkflow_input = self.updated_subworkflow_files.intersection(
job.input
)
if (
job not in self.omitforce
and job.rule in self.forcerules
or not self.forcefiles.isdisjoint(job.output)
):
reason.forced = True
elif updated_subworkflow_input:
reason.updated_input.update(updated_subworkflow_input)
elif job in self.targetjobs:
# TODO find a way to handle added/removed input files here?
if not job.output and not job.benchmark:
if job.input:
if job.rule.norun:
reason.updated_input_run.update(
f for f in job.input if not f.exists
)
else:
reason.nooutput = True
else:
reason.noio = True
else:
if job.rule in self.targetrules:
files = set(job.output)
if job.benchmark:
files.add(job.benchmark)
else:
files = set(chain(*self.depending[job].values()))
if self.targetfiles:
files.update(
f
for f in chain(job.output, job.log)
if f in self.targetfiles
)
if job.benchmark and job.benchmark in self.targetfiles:
files.add(job.benchmark)
reason.missing_output.update(job.missing_output(files))
if not reason:
output_mintime_ = output_mintime.get(job)
if output_mintime_:
updated_input = [
f for f in job.input if f.exists and f.is_newer(output_mintime_)
]
reason.updated_input.update(updated_input)
if noinitreason and reason:
reason.derived = False
reason = self.reason
_needrun = self._needrun
dependencies = self.dependencies
depending = self.depending
_n_until_ready = self._n_until_ready
_needrun.clear()
_n_until_ready.clear()
self._ready_jobs.clear()
candidates = list(self.jobs)
# Update the output mintime of all jobs.
# We traverse them in BFS (level order) starting from target jobs.
# Then, we check output mintime of job itself and all direct descendants,
# which have already been visited in the level before.
# This way, we achieve a linear runtime.
for job in candidates:
update_output_mintime(job)
# update prior reason for all candidate jobs
for job in candidates:
update_needrun(job)
queue = deque(filter(reason, candidates))
visited = set(queue)
candidates_set = set(candidates)
while queue:
job = queue.popleft()
_needrun.add(job)
for job_, files in dependencies[job].items():
missing_output = list(job_.missing_output(files))
reason(job_).missing_output.update(missing_output)
if missing_output and job_ not in visited:
visited.add(job_)
queue.append(job_)
for job_, files in depending[job].items():
if job_ in candidates_set:
if job_ not in visited:
# TODO may it happen that order determines whether
# _n_until_ready is incremented for this job?
if all(f.is_ancient for f in files):
# No other reason to run job_.
# Since all files are ancient, we do not trigger it.
continue
visited.add(job_)
queue.append(job_)
_n_until_ready[job_] += 1
reason(job_).updated_input_run.update(files)
# update len including finished jobs (because they have already increased the job counter)
self._len = len(self._finished | self._needrun)
def in_until(self, job):
"""Return whether given job has been specified via --until."""
return job.rule.name in self.untilrules or not self.untilfiles.isdisjoint(
job.output
)
def in_omitfrom(self, job):
"""Return whether given job has been specified via --omit-from."""
return job.rule.name in self.omitrules or not self.omitfiles.isdisjoint(
job.output
)
def until_jobs(self):
"""Returns a generator of jobs specified by untiljobs."""
return (job for job in self.jobs if self.in_until(job))
def omitfrom_jobs(self):
"""Returns a generator of jobs specified by omitfromjobs."""
return (job for job in self.jobs if self.in_omitfrom(job))
def downstream_of_omitfrom(self):
"""Returns the downstream of --omit-from rules or files and themselves."""
return self.bfs(self.depending, *self.omitfrom_jobs())
def delete_omitfrom_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.omitrules and not self.omitfiles:
return
downstream_jobs = list(
self.downstream_of_omitfrom()
) # need to cast as list before deleting jobs
for job in downstream_jobs:
self.delete_job(job, recursive=False, add_dependencies=True)
def set_until_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.untilrules and not self.untilfiles:
return
self.targetjobs = set(self.until_jobs())
def update_priority(self):
""" Update job priorities. """
prioritized = (
lambda job: job.rule in self.priorityrules
or not self.priorityfiles.isdisjoint(job.output)
)
for job in self.needrun_jobs:
self._priority[job] = job.rule.priority
for job in self.bfs(
self.dependencies,
*filter(prioritized, self.needrun_jobs),
stop=self.noneedrun_finished,
):
self._priority[job] = Job.HIGHEST_PRIORITY
def update_groups(self):
groups = dict()
for job in self.needrun_jobs:
if job.group is None:
continue
stop = lambda j: j.group != job.group
# BFS into depending needrun jobs if in same group
# Note: never go up here (into depending), because it may contain
# jobs that have been sorted out due to e.g. ruleorder.
group = self.group_job_factory.new(
job.group,
(
job
for job in self.bfs(self.dependencies, job, stop=stop)
if self.needrun(job)
),
)
# merge with previously determined groups if present
for j in group:
if j in groups:
other = groups[j]
other.merge(group)
group = other
# update assignment
for j in group:
# Since groups might have been merged, we need
# to update each job j in group.
groups[j] = group
self._group = groups
self._update_group_components()
def _update_group_components(self):
# span connected components if requested
for groupid, conn_components in groupby(
set(self._group.values()), key=lambda group: group.groupid
):
n_components = self.workflow.group_components.get(groupid, 1)
if n_components > 1:
for chunk in group_into_chunks(n_components, conn_components):
if len(chunk) > 1:
primary = chunk[0]
for secondary in chunk[1:]:
primary.merge(secondary)
for j in primary:
self._group[j] = primary
def update_ready(self, jobs=None):
"""Update information whether a job is ready to execute.
Given jobs must be needrun jobs!
"""
if jobs is None:
jobs = self.needrun_jobs
potential_new_ready_jobs = False
candidate_groups = set()
for job in jobs:
if job in self._ready_jobs or job in self._running:
# job has been seen before or is running, no need to process again
continue
if not self.finished(job) and self._ready(job):
potential_new_ready_jobs = True
if job.group is None:
self._ready_jobs.add(job)
else:
group = self._group[job]
group.finalize()
candidate_groups.add(group)
self._ready_jobs.update(
group
for group in candidate_groups
if all(self._ready(job) for job in group)
)
return potential_new_ready_jobs
def get_jobs_or_groups(self):
visited_groups = set()
for job in self.jobs:
if job.group is None:
yield job
else:
group = self._group[job]
if group in visited_groups:
continue
visited_groups.add(group)
yield group
def close_remote_objects(self):
"""Close all remote objects."""
for job in self.jobs:
if not self.needrun(job):
job.close_remote()
def postprocess(self, update_needrun=True):
"""Postprocess the DAG. This has to be invoked after any change to the
DAG topology."""
self.cleanup()
self.update_jobids()
if update_needrun:
self.update_needrun()
self.update_priority()
self.handle_pipes()
self.update_groups()
self.update_ready()
self.close_remote_objects()
self.update_checkpoint_outputs()
def handle_pipes(self):
"""Use pipes to determine job groups. Check if every pipe has exactly
one consumer"""
for job in self.needrun_jobs:
candidate_groups = set()
if job.group is not None:
candidate_groups.add(job.group)
all_depending = set()
has_pipe = False
for f in job.output:
if is_flagged(f, "pipe"):
if job.is_run:
raise WorkflowError(
"Rule defines pipe output but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
has_pipe = True
depending = [
j for j, files in self.depending[job].items() if f in files
]
if len(depending) > 1:
raise WorkflowError(
"Output file {} is marked as pipe "
"but more than one job depends on "
"it. Make sure that any pipe "
"output is only consumed by one "
"job".format(f),
rule=job.rule,
)
elif len(depending) == 0:
raise WorkflowError(
"Output file {} is marked as pipe "
"but it has no consumer. This is "
"invalid because it can lead to "
"a dead lock.".format(f),
rule=job.rule,
)
depending = depending[0]
if depending.is_run:
raise WorkflowError(
"Rule consumes pipe input but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
all_depending.add(depending)
if depending.group is not None:
candidate_groups.add(depending.group)
if not has_pipe:
continue
if len(candidate_groups) > 1:
raise WorkflowError(
"An output file is marked as "
"pipe, but consuming jobs "
"are part of conflicting "
"groups.",
rule=job.rule,
)
elif candidate_groups:
# extend the candidate group to all involved jobs
group = candidate_groups.pop()
else:
# generate a random unique group name
group = str(uuid.uuid4())
job.group = group
for j in all_depending:
j.group = group
def _ready(self, job):
"""Return whether the given job is ready to execute."""
group = self._group.get(job, None)
if group is None:
return self._n_until_ready[job] == 0
else:
n_internal_deps = lambda job: sum(
self._group.get(dep) == group for dep in self.dependencies[job]
)
return all(
(self._n_until_ready[job] - n_internal_deps(job)) == 0 for job in group
)
def update_checkpoint_dependencies(self, jobs=None):
"""Update dependencies of checkpoints."""
updated = False
self.update_checkpoint_outputs()
if jobs is None:
jobs = [job for job in self.jobs if not self.needrun(job)]
for job in jobs:
if job.is_checkpoint:
depending = list(self.depending[job])
# re-evaluate depending jobs, replace and update DAG
for j in depending:
logger.info("Updating job {}.".format(j))
newjob = j.updated()
self.replace_job(j, newjob, recursive=False)
updated = True
if updated:
self.postprocess()
return updated
def register_running(self, jobs):
self._running.update(jobs)
self._ready_jobs -= jobs
for job in jobs:
try:
del self._n_until_ready[job]
except KeyError:
# already gone
pass
def finish(self, job, update_dynamic=True):
"""Finish a given job (e.g. remove from ready jobs, mark depending jobs
as ready)."""
self._running.remove(job)
# turn off this job's Reason
self.reason(job).mark_finished()
try:
self._ready_jobs.remove(job)
except KeyError:
pass
if job.is_group():
jobs = job
else:
jobs = [job]
self._finished.update(jobs)
updated_dag = False
if update_dynamic:
updated_dag = self.update_checkpoint_dependencies(jobs)
depending = [
j
for job in jobs
for j in self.depending[job]
if not self.in_until(job) and self.needrun(j)
]
if not updated_dag:
# Mark depending jobs as ready.
# Skip jobs that are marked as until jobs.
# This is not necessary if the DAG has been fully updated above.
for job in depending:
self._n_until_ready[job] -= 1
potential_new_ready_jobs = self.update_ready(depending)
for job in jobs:
if update_dynamic and job.dynamic_output:
logger.info("Dynamically updating jobs")
newjob = self.update_dynamic(job)
if newjob:
# simulate that this job ran and was finished before
self.omitforce.add(newjob)
self._needrun.add(newjob)
self._finished.add(newjob)
updated_dag = True
self.postprocess()
self.handle_protected(newjob)
self.handle_touch(newjob)
if updated_dag:
# We might have new jobs, so we need to ensure that all conda envs
# and singularity images are set up.
if self.workflow.use_singularity:
self.pull_container_imgs()
if self.workflow.use_conda:
self.create_conda_envs()
potential_new_ready_jobs = True
return potential_new_ready_jobs
def new_job(self, rule, targetfile=None, format_wildcards=None):
"""Create new job for given rule and (optional) targetfile.
This will reuse existing jobs with the same wildcards."""
key = (rule, targetfile)
if key in self.job_cache:
assert targetfile is not None
return self.job_cache[key]
wildcards_dict = rule.get_wildcards(targetfile)
job = self.job_factory.new(
rule,
self,
wildcards_dict=wildcards_dict,
format_wildcards=format_wildcards,
targetfile=targetfile,
)
self.cache_job(job)
return job
def cache_job(self, job):
for f in job.products:
self.job_cache[(job.rule, f)] = job
def update_dynamic(self, job):
"""Update the DAG by evaluating the output of the given job that
contains dynamic output files."""
dynamic_wildcards = job.dynamic_wildcards
if not dynamic_wildcards:
# this happens e.g. in dryrun if output is not yet present
return
depending = list(
filter(lambda job_: not self.finished(job_), self.bfs(self.depending, job))
)
newrule, non_dynamic_wildcards = job.rule.dynamic_branch(
dynamic_wildcards, input=False
)
self.specialize_rule(job.rule, newrule)
# no targetfile needed for job
newjob = self.new_job(newrule, format_wildcards=non_dynamic_wildcards)
self.replace_job(job, newjob)
for job_ in depending:
needs_update = any(
f.get_wildcard_names() & dynamic_wildcards.keys()
for f in job_.rule.dynamic_input
)
if needs_update:
newrule_ = job_.rule.dynamic_branch(dynamic_wildcards)
if newrule_ is not None:
self.specialize_rule(job_.rule, newrule_)
if not self.dynamic(job_):
logger.debug("Updating job {}.".format(job_))
newjob_ = self.new_job(
newrule_, targetfile=job_.output[0] if job_.output else None
)
unexpected_output = self.reason(
job_
).missing_output.intersection(newjob.existing_output)
if unexpected_output:
logger.warning(
"Warning: the following output files of rule {} were not "
"present when the DAG was created:\n{}".format(
newjob_.rule, unexpected_output
)
)
self.replace_job(job_, newjob_)
return newjob
def delete_job(self, job, recursive=True, add_dependencies=False):
"""Delete given job from DAG."""
if job in self.targetjobs:
self.targetjobs.remove(job)
if add_dependencies:
for _job in self.dependencies[job]:
self.targetjobs.add(_job)
for job_ in self.depending[job]:
del self.dependencies[job_][job]
del self.depending[job]
for job_ in self.dependencies[job]:
depending = self.depending[job_]
del depending[job]
if not depending and recursive:
self.delete_job(job_)
del self.dependencies[job]
if job in self._needrun:
self._len -= 1
self._needrun.remove(job)
del self._reason[job]
if job in self._finished:
self._finished.remove(job)
if job in self._dynamic:
self._dynamic.remove(job)
if job in self._ready_jobs:
self._ready_jobs.remove(job)
if job in self._n_until_ready:
del self._n_until_ready[job]
# remove from cache
for f in job.output:
try:
del self.job_cache[(job.rule, f)]
except KeyError:
pass
def replace_job(self, job, newjob, recursive=True):
"""Replace given job with new job."""
add_to_targetjobs = job in self.targetjobs
jobid = self.jobid(job)
depending = list(self.depending[job].items())
if self.finished(job):
self._finished.add(newjob)
self.delete_job(job, recursive=recursive)
self._jobid[newjob] = jobid
if add_to_targetjobs:
self.targetjobs.add(newjob)
self.cache_job(newjob)
self.update([newjob])
logger.debug("Replace {} with dynamic branch {}".format(job, newjob))
for job_, files in depending:
# if not job_.dynamic_input:
logger.debug("updating depending job {}".format(job_))
self.dependencies[job_][newjob].update(files)
self.depending[newjob][job_].update(files)
def specialize_rule(self, rule, newrule):
"""Specialize the given rule by inserting newrule into the DAG."""
assert newrule is not None
self.rules.add(newrule)
self.update_output_index()
def is_batch_rule(self, rule):
"""Return True if the underlying rule is to be used for batching the DAG."""
return self.batch is not None and rule.name == self.batch.rulename
def collect_potential_dependencies(self, job, known_producers):
"""Collect all potential dependencies of a job. These might contain
ambiguities. The keys of the returned dict represent the files to be considered."""
# use a set to circumvent multiple jobs for the same file
# if user specified it twice
file2jobs = self.file2jobs
input_files = list(job.unique_input)
if self.is_batch_rule(job.rule):
# only consider the defined partition of the input files
input_batch = self.batch.get_batch(input_files)
if len(input_batch) != len(input_files):
logger.info(
"Considering only batch {} for DAG computation.\n"
"All jobs beyond the batching rule are omitted until the final batch.\n"
"Don't forget to run the other batches too.".format(self.batch)
)
input_files = input_batch
for file in input_files:
# omit the file if it comes from a subworkflow
if file in job.subworkflow_input:
continue
try:
yield PotentialDependency(file, known_producers[file], True)
except KeyError:
try:
if file in job.dependencies:
yield PotentialDependency(
file,
[self.new_job(job.dependencies[file], targetfile=file)],
False,
)
else:
yield PotentialDependency(file, file2jobs(file), False)
except MissingRuleException as ex:
# no dependency found
yield PotentialDependency(file, None, False)
def bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG."""
queue = deque(jobs)
visited = set(queue)
while queue:
job = queue.popleft()
if stop(job):
# stop criterion reached for this node
continue
yield job
for job_ in direction[job].keys():
if not job_ in visited:
queue.append(job_)
visited.add(job_)
def level_bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG, but also yield the
level together with each job."""
queue = [(job, 0) for job in jobs]
visited = set(jobs)
while queue:
job, level = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield level, job
level += 1
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append((job_, level))
visited.add(job_)
def dfs(self, direction, *jobs, stop=lambda job: False, post=True):
"""Perform depth-first traversal of the DAG."""
visited = set()
def _dfs(job):
"""Inner function for DFS traversal."""
if stop(job):
return
if not post:
yield job
for job_ in direction[job]:
if not job_ in visited:
visited.add(job_)
for j in _dfs(job_):
yield j
if post:
yield job
for job in jobs:
for job_ in self._dfs(direction, job, visited, stop=stop, post=post):
yield job_
def new_wildcards(self, job):
"""Return wildcards that are newly introduced in this job,
compared to its ancestors."""
new_wildcards = set(job.wildcards.items())
for job_ in self.dependencies[job]:
if not new_wildcards:
return set()
for wildcard in job_.wildcards.items():
new_wildcards.discard(wildcard)
return new_wildcards
def rule2job(self, targetrule):
"""Generate a new job from a given rule."""
if targetrule.has_wildcards():
raise WorkflowError(
"Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards."
)
return self.new_job(targetrule)
def file2jobs(self, targetfile):
rules = self.output_index.match(targetfile)
jobs = []
exceptions = list()
for rule in rules:
if rule.is_producer(targetfile):
try:
jobs.append(self.new_job(rule, targetfile=targetfile))
except InputFunctionException as e:
exceptions.append(e)
if not jobs:
if exceptions:
raise exceptions[0]
raise MissingRuleException(targetfile)
return jobs
def rule_dot2(self):
dag = defaultdict(list)
visited = set()
preselect = set()
def preselect_parents(job):
for parent in self.depending[job]:
if parent in preselect:
continue
preselect.add(parent)
preselect_parents(parent)
def build_ruledag(job, key=lambda job: job.rule.name):
if job in visited:
return
visited.add(job)
deps = sorted(self.dependencies[job], key=key)
deps = [
(
group[0]
if preselect.isdisjoint(group)
else preselect.intersection(group).pop()
)
for group in (list(g) for _, g in groupby(deps, key))
]
dag[job].extend(deps)
preselect_parents(job)
for dep in deps:
build_ruledag(dep)
for job in self.targetjobs:
build_ruledag(job)
return self._dot(dag.keys(), print_wildcards=False, print_types=False, dag=dag)
def rule_dot(self):
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
return self._dot(graph)
def dot(self):
def node2style(job):
if not self.needrun(job):
return "rounded,dashed"
if self.dynamic(job) or job.dynamic_input:
return "rounded,dotted"
return "rounded"
def format_wildcard(wildcard):
name, value = wildcard
if DYNAMIC_FILL in value:
value = "..."
return "{}: {}".format(name, value)
node2rule = lambda job: job.rule
node2label = lambda job: "\\n".join(
chain(
[job.rule.name], sorted(map(format_wildcard, self.new_wildcards(job)))
)
)
dag = {job: self.dependencies[job] for job in self.jobs}
return self._dot(
dag, node2rule=node2rule, node2style=node2style, node2label=node2label
)
def _dot(
self,
graph,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# color rules
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: "{:.2f} 0.6 0.85".format(i * huefactor)
for i, rule in enumerate(self.rules)
}
# markup
node_markup = '\t{}[label = "{}", color = "{}", style="{}"];'.format
edge_markup = "\t{} -> {}".format
# node ids
ids = {node: i for i, node in enumerate(graph)}
# calculate nodes
nodes = [
node_markup(
ids[node],
node2label(node),
rulecolor[node2rule(node)],
node2style(node),
)
for node in graph
]
# calculate edges
edges = [
edge_markup(ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def filegraph_dot(
self,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# NOTE: This is code from the rule_dot method.
# This method could be split like there as well, however,
# it cannot easily reuse the _dot method due to the different node type
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
# node ids
ids = {node: i for i, node in enumerate(graph)}
# Compute colors for rules
def hsv_to_htmlhexrgb(h, s, v):
"""Convert hsv colors to hex-encoded rgb colors usable by html."""
import colorsys
hex_r, hex_g, hex_b = (round(255 * x) for x in colorsys.hsv_to_rgb(h, s, v))
return "#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}".format(
hex_r=hex_r, hex_g=hex_g, hex_b=hex_b
)
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: hsv_to_htmlhexrgb(i * huefactor, 0.6, 0.85)
for i, rule in enumerate(self.rules)
}
def resolve_input_functions(input_files):
"""Iterate over all input files and replace input functions
with a fixed string.
"""
files = []
for f in input_files:
if callable(f):
files.append("<input function>")
# NOTE: This is a workaround. It would be more informative
# to show the code of the input function here (if it is
# short enough). This cannot be easily done with the inspect
# module, since the line numbers in the Snakefile do not
# behave as expected. One (complicated) solution for this
# would be to find the Snakefile and directly extract the
# code of the function.
else:
files.append(repr(f).strip("'"))
return files
def html_node(node_id, node, color):
"""Assemble a html style node for graphviz"""
input_files = resolve_input_functions(node._input)
output_files = [repr(f).strip("'") for f in node._output]
input_header = (
'<b><font point-size="14">↪ input</font></b>'
if input_files
else ""
)
output_header = (
'<b><font point-size="14">output →</font></b>'
if output_files
else ""
)
html_node = [
'{node_id} [ shape=none, margin=0, label=<<table border="2" color="{color}" cellspacing="3" cellborder="0">'.format(
node_id=node_id, color=color
),
"<tr><td>",
'<b><font point-size="18">{node.name}</font></b>'.format(node=node),
"</td></tr>",
"<hr/>",
'<tr><td align="left"> {input_header} </td></tr>'.format(
input_header=input_header
),
]
for filename in sorted(input_files):
# Escape html relevant chars like '<' and '>' in filenames
# These can be added by input functions etc. and cannot be
# displayed in graphviz HTML nodes.
in_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{in_file}</font></td>'.format(
in_file=in_file
),
"</tr>",
]
)
html_node.append("<hr/>")
html_node.append(
'<tr><td align="right"> {output_header} </td> </tr>'.format(
output_header=output_header
)
)
for filename in sorted(output_files):
out_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{out_file}</font></td>'
"</tr>".format(out_file=out_file),
]
)
html_node.append("</table>>]")
return "\n".join(html_node)
nodes = [
html_node(ids[node], node, rulecolor[node2rule(node)]) for node in graph
]
# calculate edges
edge_markup = "\t{} -> {}".format
edges = [
edge_markup(ids[dep], ids[node], ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def summary(self, detailed=False):
if detailed:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tinput-file(s)\tshellcmd\tstatus\tplan"
else:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tstatus\tplan"
for job in self.jobs:
output = job.rule.output if self.dynamic(job) else job.expanded_output
for f in output:
rule = self.workflow.persistence.rule(f)
rule = "-" if rule is None else rule
version = self.workflow.persistence.version(f)
version = "-" if version is None else str(version)
date = time.ctime(f.mtime.local_or_remote()) if f.exists else "-"
pending = "update pending" if self.reason(job) else "no update"
log = self.workflow.persistence.log(f)
log = "-" if log is None else ",".join(log)
input = self.workflow.persistence.input(f)
input = "-" if input is None else ",".join(input)
shellcmd = self.workflow.persistence.shellcmd(f)
shellcmd = "-" if shellcmd is None else shellcmd
# remove new line characters, leading and trailing whitespace
shellcmd = shellcmd.strip().replace("\n", "; ")
status = "ok"
if not f.exists:
status = "missing"
elif self.reason(job).updated_input:
status = "updated input files"
elif self.workflow.persistence.version_changed(job, file=f):
status = "version changed to {}".format(job.rule.version)
elif self.workflow.persistence.code_changed(job, file=f):
status = "rule implementation changed"
elif self.workflow.persistence.input_changed(job, file=f):
status = "set of input files changed"
elif self.workflow.persistence.params_changed(job, file=f):
status = "params changed"
if detailed:
yield "\t".join(
(f, date, rule, version, log, input, shellcmd, status, pending)
)
else:
yield "\t".join((f, date, rule, version, log, status, pending))
def archive(self, path):
"""Archives workflow such that it can be re-run on a different system.
Archiving includes git versioned files (i.e. Snakefiles, config files, ...),
ancestral input files and conda environments.
"""
if path.endswith(".tar"):
mode = "x"
elif path.endswith("tar.bz2"):
mode = "x:bz2"
elif path.endswith("tar.xz"):
mode = "x:xz"
elif path.endswith("tar.gz"):
mode = "x:gz"
else:
raise WorkflowError(
"Unsupported archive format "
"(supported: .tar, .tar.gz, .tar.bz2, .tar.xz)"
)
if os.path.exists(path):
raise WorkflowError("Archive already exists:\n" + path)
self.create_conda_envs(forceall=True)
try:
workdir = Path(os.path.abspath(os.getcwd()))
with tarfile.open(path, mode=mode, dereference=True) as archive:
archived = set()
def add(path):
if workdir not in Path(os.path.abspath(path)).parents:
logger.warning(
"Path {} cannot be archived: "
"not within working directory.".format(path)
)
else:
f = os.path.relpath(path)
if f not in archived:
archive.add(f)
archived.add(f)
logger.info("archived " + f)
logger.info(
"Archiving snakefiles, scripts and files under "
"version control..."
)
for f in self.workflow.get_sources():
add(f)
logger.info("Archiving external input files...")
for job in self.jobs:
# input files
for f in job.input:
if not any(
f in files for files in self.dependencies[job].values()
):
# this is an input file that is not created by any job
add(f)
logger.info("Archiving conda environments...")
envs = set()
for job in self.jobs:
if job.conda_env_file:
env_archive = job.archive_conda_env()
envs.add(env_archive)
for env in envs:
add(env)
except (Exception, BaseException) as e:
os.remove(path)
raise e
def clean(self, only_temp=False, dryrun=False):
"""Removes files generated by the workflow."""
for job in self.jobs:
for f in job.output:
if not only_temp or is_flagged(f, "temp"):
# The reason for the second check is that dangling
# symlinks fail f.exists.
if f.exists or os.path.islink(f):
if f.protected:
logger.error("Skipping write-protected file {}.".format(f))
else:
msg = "Deleting {}" if not dryrun else "Would delete {}"
logger.info(msg.format(f))
if not dryrun:
# Remove non-empty dirs if flagged as temp()
f.remove(remove_non_empty_dir=only_temp)
def list_untracked(self):
"""List files in the workdir that are not in the dag."""
used_files = set()
files_in_cwd = set()
for job in self.jobs:
used_files.update(
os.path.relpath(file)
for file in chain(job.local_input, job.local_output, job.log)
)
for root, dirs, files in os.walk(os.getcwd()):
# Ignore hidden files and don't traverse into hidden dirs
files_in_cwd.update(
[
os.path.relpath(os.path.join(root, f))
for f in files
if not f[0] == "."
]
)
dirs[:] = [d for d in dirs if not d[0] == "."]
for f in sorted(list(files_in_cwd - used_files)):
logger.info(f)
def d3dag(self, max_jobs=10000):
def node(job):
jobid = self.jobid(job)
return {
"id": jobid,
"value": {
"jobid": jobid,
"label": job.rule.name,
"rule": job.rule.name,
},
}
def edge(a, b):
return {"u": self.jobid(a), "v": self.jobid(b)}
jobs = list(self.jobs)
if len(jobs) > max_jobs:
logger.info(
"Job-DAG is too large for visualization (>{} jobs).".format(max_jobs)
)
else:
logger.d3dag(
nodes=[node(job) for job in jobs],
edges=[
edge(dep, job)
for job in jobs
for dep in self.dependencies[job]
if self.needrun(dep)
],
)
def stats(self):
rules = Counter()
rules.update(job.rule for job in self.needrun_jobs)
rules.update(job.rule for job in self.finished_jobs)
yield "Job counts:"
yield "\tcount\tjobs"
for rule, count in sorted(rules.most_common(), key=lambda item: item[0].name):
yield "\t{}\t{}".format(count, rule)
yield "\t{}".format(len(self))
def __str__(self):
return self.dot()
def __len__(self):
return self._len
| 37.297259
| 132
| 0.528681
|
79504808c893c374ddc7bef3ad2b52f5959d8d02
| 1,797
|
py
|
Python
|
dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/tasks/python.py
|
InterestingLab/dolphinscheduler
|
2ff76c7e7330c166fba1e514aba6ddf2dbe74f57
|
[
"Apache-2.0"
] | 2,086
|
2021-04-15T20:28:24.000Z
|
2022-03-31T22:30:49.000Z
|
dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/tasks/python.py
|
InterestingLab/dolphinscheduler
|
2ff76c7e7330c166fba1e514aba6ddf2dbe74f57
|
[
"Apache-2.0"
] | 3,789
|
2021-04-15T16:00:32.000Z
|
2022-03-31T13:38:53.000Z
|
dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/tasks/python.py
|
InterestingLab/dolphinscheduler
|
2ff76c7e7330c166fba1e514aba6ddf2dbe74f57
|
[
"Apache-2.0"
] | 1,170
|
2021-04-16T06:40:24.000Z
|
2022-03-31T22:30:51.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task Python."""
import inspect
import types
from typing import Any
from pydolphinscheduler.constants import TaskType
from pydolphinscheduler.core.task import Task
from pydolphinscheduler.exceptions import PyDSParamException
class Python(Task):
"""Task Python object, declare behavior for Python task to dolphinscheduler."""
_task_custom_attr = {
"raw_script",
}
def __init__(self, name: str, code: Any, *args, **kwargs):
super().__init__(name, TaskType.PYTHON, *args, **kwargs)
self._code = code
@property
def raw_script(self) -> str:
"""Get python task define attribute `raw_script`."""
if isinstance(self._code, str):
return self._code
elif isinstance(self._code, types.FunctionType):
py_function = inspect.getsource(self._code)
return py_function
else:
raise PyDSParamException(
"Parameter code do not support % for now.", type(self._code)
)
| 34.557692
| 83
| 0.705064
|
79504898c59d1869aade50ce83cb130522a806d4
| 1,199
|
py
|
Python
|
demo/config.py
|
jpoikela/sappho
|
58f72b6d04c1d0ec511026473f4475eed15c9bd5
|
[
"MIT"
] | 7
|
2016-08-19T23:31:02.000Z
|
2017-03-30T11:58:42.000Z
|
demo/config.py
|
jpoikela/sappho
|
58f72b6d04c1d0ec511026473f4475eed15c9bd5
|
[
"MIT"
] | 45
|
2016-07-26T19:48:09.000Z
|
2016-12-10T22:50:14.000Z
|
demo/config.py
|
lily-seabreeze/sappho
|
58f72b6d04c1d0ec511026473f4475eed15c9bd5
|
[
"MIT"
] | 1
|
2018-11-29T14:16:59.000Z
|
2018-11-29T14:16:59.000Z
|
"""Configuration constants for Sappho demo."""
import pkg_resources
# Constants/game config
# The path to the file that's being used to represent the player
ANIMATED_SPRITE_PATH = pkg_resources.resource_filename("test_scene", "test.gif")
# The path to the file being used as the tilesheet
TILESHEET_PATH = pkg_resources.resource_filename("test_scene", "tilesheet.png")
# The Tiled Map Editor file which the player explores
TMX_PATH = pkg_resources.resource_filename("test_scene", "test.tmx")
MAX_SPEED = 2
RESOLUTION = [700, 500]
"""tuple(int, int): This demo will be ran in a window of the
dimensions (x, y) pixels (width, height).
"""
VIEWPORT = (80, 80)
"""tuple(int, int): ..."""
WINDOW_TITLE = "Sappho Engine Test"
"""str: The title of the window running the demo.
The text which appears in the titlebar of the window.
"""
ANIMATED_SPRITE_Z_INDEX = 0
"""int: The layer the player's sprite will be rendered on.
0 is farthest back, and higher numbers increase toward the
foreground. The number of layers will correspond with the
map that's being loaded.
"""
START_POSITION = (10, 10)
"""tuple(int, int): The absolute pixel coordinate
of the player's starting position on the map.
"""
| 27.883721
| 80
| 0.744787
|
795048bb8fdc99b2d38eeda188d4301803ce61c7
| 1,014
|
py
|
Python
|
tutorials/true_model_mpc/tutorial_one.py
|
wangsd01/blackbox_mpc
|
7876dee1bd85bde310e88741f5c63e3f7bd93916
|
[
"MIT"
] | 29
|
2020-10-20T08:14:45.000Z
|
2022-02-01T13:43:13.000Z
|
tutorials/true_model_mpc/tutorial_one.py
|
wangsd01/blackbox_mpc
|
7876dee1bd85bde310e88741f5c63e3f7bd93916
|
[
"MIT"
] | 3
|
2020-11-27T13:25:08.000Z
|
2021-12-12T04:30:41.000Z
|
tutorials/true_model_mpc/tutorial_one.py
|
wangsd01/blackbox_mpc
|
7876dee1bd85bde310e88741f5c63e3f7bd93916
|
[
"MIT"
] | 3
|
2021-04-15T14:23:41.000Z
|
2022-03-28T05:43:29.000Z
|
"""
- instantiate an env for a pendulum
- instantiate an MPC controller using the true known analytical model
- define cost/reward functions as used in the openAI gym env.
- render the resulting MPC afterwards
"""
from blackbox_mpc.policies.mpc_policy import \
MPCPolicy
from blackbox_mpc.utils.pendulum import PendulumTrueModel, \
pendulum_reward_function
import gym
env = gym.make("Pendulum-v0")
mpc_policy = MPCPolicy(reward_function=pendulum_reward_function,
env_action_space=env.action_space,
env_observation_space=env.observation_space,
true_model=True,
dynamics_function=PendulumTrueModel(),
optimizer_name='RandomSearch',
num_agents=1)
current_obs = env.reset()
for t in range(200):
action_to_execute, expected_obs, expected_reward = mpc_policy.act(
current_obs, t)
current_obs, reward, _, info = env.step(action_to_execute)
env.render()
| 36.214286
| 70
| 0.68146
|
7950494fa540a3d73b3a5c1228a1b4be9ff19522
| 2,660
|
py
|
Python
|
app/htmlSectionsExtractor.py
|
daniel-julio-iglesias/sectionsextractor
|
3337d770bf39d06cd79e0ae6608889004e6ea8f6
|
[
"MIT"
] | null | null | null |
app/htmlSectionsExtractor.py
|
daniel-julio-iglesias/sectionsextractor
|
3337d770bf39d06cd79e0ae6608889004e6ea8f6
|
[
"MIT"
] | 4
|
2020-03-24T16:10:04.000Z
|
2022-03-08T21:09:17.000Z
|
app/htmlSectionsExtractor.py
|
daniel-julio-iglesias/sectionsextractor
|
3337d770bf39d06cd79e0ae6608889004e6ea8f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#! /usr/bin/env python3
# -*- coding: cp1252 -*-
"""
GNU+Health%2FFamilies
GNUHealthFamilies.html
https://en.wikibooks.org/wiki/GNU_Health/Families
exampleSoup.select('p')
exampleSoup.select('h1')
exampleSoup.select('h2')
"""
import os
import codecs
import math
from config import basedir
import requests, bs4
from app import http_proxy, https_proxy
class HTMLSectionsExtractor:
def __init__(self, trainingdir, inputdoc):
filename = trainingdir + inputdoc
# self.f = open(filename)
self.f = codecs.open(filename, 'r', 'iso8859-1')
# print(self.f.readline())
""" self.proxies = {
'http': 'http://user:pass@proxyAddress:proxyPort',
'https': 'http://user:pass@proxyAddress:proxyPort0',
}
"""
self.proxies = {
'http': http_proxy,
'https': https_proxy,
}
print('http_proxy: ', http_proxy)
# TODO: If behind proxy, set your proxy
# self.res = requests.get('https://en.wikibooks.org/wiki/GNU_Health/Families')
self.res = requests.get('https://en.wikibooks.org/wiki/GNU_Health/Families', proxies=self.proxies)
if self.res.status_code == requests.codes.ok:
# self.gnuHealthSoup = bs4.BeautifulSoup(self.res.text)
# self.gnuHealthSoup = bs4.BeautifulSoup(self.res.text, "html.parser")
self.gnuHealthSoup = bs4.BeautifulSoup(self.res.text, "lxml")
def extract(self):
# content = self.f.readline()
content = None
if not isinstance(self.gnuHealthSoup, bs4.BeautifulSoup):
return content
else:
content = ''
h1Elems = self.gnuHealthSoup.select('h1')
str(h1Elems[0])
content = content + ' ' + h1Elems[0].getText() # TypeError: unsupported operand type(s) for +: 'NoneType' and 'str'
h2Elems = self.gnuHealthSoup.select('h2')
str(h2Elems[0])
content = content + ' ' + h2Elems[0].getText()
pElems = self.gnuHealthSoup.select('p')
str(pElems[0])
content = content + ' ' + pElems[0].getText()
# content = self.gnuHealthSoup.get_text()
# print(content)
return content
def main():
# training_dir = os.path.join(basedir, 'app', 'static', 'app', 'app-kb', 'app-kb-train')
training_dir = os.path.join(basedir, 'app', 'static', 'app', 'app-kb', 'app-kb-train', '00010Preface')
training_dir = training_dir + os.sep
doc = '01.txt'
hse = HTMLSectionsExtractor(training_dir, doc)
print(hse.extract())
if __name__ == '__main__':
main()
| 28.602151
| 125
| 0.611278
|
79504ae293a9e48f06cacfbea0be75c1633b27fe
| 2,189
|
py
|
Python
|
PWE_Diagnostic_Lattice_Tool/ASP_LogicProgram.py
|
idaks/PWE-Diagnostic-Lattice-Tool
|
b8b4823d7a3a350b1fd1341160a900337c4d7cd8
|
[
"MIT"
] | null | null | null |
PWE_Diagnostic_Lattice_Tool/ASP_LogicProgram.py
|
idaks/PWE-Diagnostic-Lattice-Tool
|
b8b4823d7a3a350b1fd1341160a900337c4d7cd8
|
[
"MIT"
] | null | null | null |
PWE_Diagnostic_Lattice_Tool/ASP_LogicProgram.py
|
idaks/PWE-Diagnostic-Lattice-Tool
|
b8b4823d7a3a350b1fd1341160a900337c4d7cd8
|
[
"MIT"
] | null | null | null |
from .LogicProgram import LogicProgram
from PW_explorer.run_clingo import run_clingo
from PW_explorer.run_dlv import run_dlv
from PW_explorer.load_worlds import load_worlds
from .LatticeNode import NodeAmbiguityType
class ASP_LogicProgram(LogicProgram):
def __init__(self, initial_encoding: str, constraint_keyword: str='comp', reasoner: str='clingo'):
"""
:param initial_encoding: Encoding w/o any of the constraints turned on.
:param constraint_keyword: The arity 1 relation name to use to turn constraints on.
:param reasoner: Reasoner to use. Choices: 'clingo' (default) or 'dlv'.
"""
LogicProgram.__init__(self)
self.encoding = initial_encoding
self.constraint_keyword = constraint_keyword
self.reasoner = reasoner
def run_reasoner(self, constraints: list, num_pws: int):
run_reasoner_func = {'clingo': run_clingo,
'dlv': run_dlv,
}[self.reasoner]
constraint_activations = "\n".join(["{}({}).".format(self.constraint_keyword, c) for c in set(constraints)])
map_soln, md = run_reasoner_func(self.encoding + '\n' + constraint_activations, num_solutions=num_pws)
pw_rel_dfs, rel_schemas, pws = load_worlds(map_soln, silent=True)
return pw_rel_dfs, rel_schemas, pws
def get_num_solutions(self, constraints: list):
_, _, pws = self.run_reasoner(constraints=constraints, num_pws=0)
return len(pws)
def check_sat(self, constraints):
_, _, pws = self.run_reasoner(constraints, num_pws=1)
return len(pws) >= 1
def check_ambiguity(self, constraints) -> NodeAmbiguityType:
"""
NodeAmbiguityType.unsat == 0 --> UNSAT (UNSAT)
NodeAmbiguityType.unambiguous == 1 --> UNAMBIGUOUS (SAT)
NodeAmbiguityType.ambiguous == 2 --> AMBIGUOUS (SAT)
"""
_, _, pws = self.run_reasoner(constraints, num_pws=2)
if len(pws) >= 2:
return NodeAmbiguityType.ambiguous
elif len(pws) == 1:
return NodeAmbiguityType.unambiguous
else:
return NodeAmbiguityType.unsat
| 39.8
| 116
| 0.655094
|
79504bb215540ac7e388ee79c9965b53c49a36b0
| 156
|
py
|
Python
|
runbot.py
|
tkgwku/meiro-ilas-seminar-2017
|
5215aeb3c16c69726133303cdaff4d3fbd5916d3
|
[
"MIT"
] | null | null | null |
runbot.py
|
tkgwku/meiro-ilas-seminar-2017
|
5215aeb3c16c69726133303cdaff4d3fbd5916d3
|
[
"MIT"
] | null | null | null |
runbot.py
|
tkgwku/meiro-ilas-seminar-2017
|
5215aeb3c16c69726133303cdaff4d3fbd5916d3
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from slackbot.bot import Bot
def main():
bot = Bot()
bot.run()
if __name__ == "__main__":
print('start slackbot')
main()
| 13
| 28
| 0.596154
|
79504beac7c863d7e48b30cb1edac572ac9af7b4
| 3,293
|
py
|
Python
|
docs/examples/aggplot-collisions.py
|
lmmarsano/geoplot
|
77470728ddf726292bc84b2ed207d932d2069862
|
[
"MIT"
] | null | null | null |
docs/examples/aggplot-collisions.py
|
lmmarsano/geoplot
|
77470728ddf726292bc84b2ed207d932d2069862
|
[
"MIT"
] | null | null | null |
docs/examples/aggplot-collisions.py
|
lmmarsano/geoplot
|
77470728ddf726292bc84b2ed207d932d2069862
|
[
"MIT"
] | null | null | null |
# This example demonstrates how to use the `aggplot` function, using part of a NYC traffic collisions dataset.
#
# In the first plot we have a bunch of point data, but don't provide any geometry about the locations. So `aggplot`
# invents its own, partitioning the sample space into increasingly fine squares (known as a "quadtree"). Each of the
# squares has between approximately `nmin` and `nmax` samples inside, and the `agg` statistic (`np.avg` by default; `np
# .max` here) is applied to each square.
#
# It is relatively common for datasets to contain a field describing the "neighborhood" a particular observation is in.
# In that case you can use the `by` parameter to summarize by neighborhood (`ZIP CODE` in this example). The result is
# an aggregation using convex hulls: still not pretty, but functional.
#
# Finally there is the case that we have full geospatial context. In that case pass the geometry to the `geometry`
# field; `aggplot` will handle aggregating the result and generate a nicely packaged choropleth!
# Load the data (uses the `quilt` package).
from quilt.data.ResidentMario import nyc_collisions
import shapely
import geopandas as gpd
import pandas as pd
collisions = nyc_collisions.collisions()
def pointify(srs):
try:
lat, long = float(srs['LATITUDE']), float(srs['LONGITUDE'])
if pd.isnull(lat) or pd.isnull(long):
return shapely.geometry.Point((0, 0))
else:
return shapely.geometry.Point((long, lat))
except ValueError:
return shapely.geometry.Point((0, 0))
collisions = gpd.GeoDataFrame(collisions.head(5000), geometry=collisions.head(5000).apply(pointify, axis='columns'))
collisions = collisions[collisions.geometry.map(lambda srs: not (srs.x == 0))]
collisions['BOROUGH'] = collisions['BOROUGH'].str.strip().map(lambda v: np.nan if len(v) == 0 else v)
# Plot the data.
import geoplot as gplt
import geoplot.crs as gcrs
import numpy as np
import matplotlib.pyplot as plt
f, axarr = plt.subplots(3, 1, figsize=(12, 12), subplot_kw={
'projection': gcrs.AlbersEqualArea(central_latitude=40.7128, central_longitude=-74.0059)
})
plt.suptitle('Max(Injuries) in Collision by Area, 2016', fontsize=16)
plt.subplots_adjust(top=0.95)
ax1 = gplt.aggplot(collisions, projection=gcrs.AlbersEqualArea(),
hue='NUMBER OF PERSONS INJURED', agg=np.max, cmap='Reds',
nmin=100, nmax=500,
linewidth=0.5, edgecolor='white',
ax=axarr[0])
ax1.set_title("No Geometry (Quadtree)")
ax2 = gplt.aggplot(collisions, projection=gcrs.AlbersEqualArea(),
hue='NUMBER OF PERSONS INJURED', agg=np.max, cmap='Reds', by='ZIP CODE',
linewidth=0.5, edgecolor='white',
ax=axarr[1])
ax2.set_title("Categorical Geometry (Convex Hull)")
zip_codes = gplt.datasets.load('nyc-zip-codes')
ax3 = gplt.aggplot(collisions, projection=gcrs.AlbersEqualArea(),
hue='NUMBER OF PERSONS INJURED', agg=np.max, by='ZIP CODE', geometry=zip_codes.geometry,
cmap='Reds', linewidth=0.5, edgecolor='white',
ax=axarr[2])
ax3.set_title("Geometry Provided (Choropleth)")
plt.savefig("aggplot-collisions-1.png", bbox_inches='tight', pad_inches=0.1)
| 44.5
| 119
| 0.696022
|
79504c2079f89bdf3ec5c225cf4f0f9259ca5750
| 14,123
|
py
|
Python
|
src/lib/reportlab/graphics/charts/spider.py
|
tschalch/pyTray
|
ab26c7d4618c16f1ef6df45f7c8bae81f08f855b
|
[
"BSD-3-Clause"
] | 1
|
2018-06-15T09:42:22.000Z
|
2018-06-15T09:42:22.000Z
|
src/lib/reportlab/graphics/charts/spider.py
|
tschalch/pyTray
|
ab26c7d4618c16f1ef6df45f7c8bae81f08f855b
|
[
"BSD-3-Clause"
] | null | null | null |
src/lib/reportlab/graphics/charts/spider.py
|
tschalch/pyTray
|
ab26c7d4618c16f1ef6df45f7c8bae81f08f855b
|
[
"BSD-3-Clause"
] | null | null | null |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/spider.py
# spider chart, also known as radar chart
"""Spider Chart
Normal use shows variation of 5-10 parameters against some 'norm' or target.
When there is more than one series, place the series with the largest
numbers first, as it will be overdrawn by each successive one.
"""
__version__=''' $Id: spider.py,v 1.1 2006/05/26 19:19:40 thomas Exp $ '''
import copy
from math import sin, cos, pi
from reportlab.lib import colors
from reportlab.lib.validators import isColor, isNumber, isListOfNumbersOrNone,\
isListOfNumbers, isColorOrNone, isString,\
isListOfStringsOrNone, OneOf, SequenceOf,\
isBoolean, isListOfColors, isNumberOrNone,\
isNoneOrListOfNoneOrStrings, isTextAnchor,\
isNoneOrListOfNoneOrNumbers, isBoxAnchor,\
isStringOrNone
from reportlab.lib.attrmap import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics.shapes import Group, Drawing, Line, Rect, Polygon, Ellipse, \
Wedge, String, STATE_DEFAULTS
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.charts.areas import PlotArea
from piecharts import WedgeLabel
from reportlab.graphics.widgets.markers import makeMarker, uSymbol2Symbol
class StrandProperties(PropHolder):
"""This holds descriptive information about concentric 'strands'.
Line style, whether filled etc.
"""
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber),
fillColor = AttrMapValue(isColorOrNone),
strokeColor = AttrMapValue(isColorOrNone),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone),
fontName = AttrMapValue(isString),
fontSize = AttrMapValue(isNumber),
fontColor = AttrMapValue(isColorOrNone),
labelRadius = AttrMapValue(isNumber),
markers = AttrMapValue(isBoolean),
markerType = AttrMapValue(isAnything),
markerSize = AttrMapValue(isNumber),
label_dx = AttrMapValue(isNumber),
label_dy = AttrMapValue(isNumber),
label_angle = AttrMapValue(isNumber),
label_boxAnchor = AttrMapValue(isBoxAnchor),
label_boxStrokeColor = AttrMapValue(isColorOrNone),
label_boxStrokeWidth = AttrMapValue(isNumber),
label_boxFillColor = AttrMapValue(isColorOrNone),
label_strokeColor = AttrMapValue(isColorOrNone),
label_strokeWidth = AttrMapValue(isNumber),
label_text = AttrMapValue(isStringOrNone),
label_leading = AttrMapValue(isNumberOrNone),
label_width = AttrMapValue(isNumberOrNone),
label_maxWidth = AttrMapValue(isNumberOrNone),
label_height = AttrMapValue(isNumberOrNone),
label_textAnchor = AttrMapValue(isTextAnchor),
label_visible = AttrMapValue(isBoolean,desc="True if the label is to be drawn"),
label_topPadding = AttrMapValue(isNumber,'padding at top of box'),
label_leftPadding = AttrMapValue(isNumber,'padding at left of box'),
label_rightPadding = AttrMapValue(isNumber,'padding at right of box'),
label_bottomPadding = AttrMapValue(isNumber,'padding at bottom of box'),
)
def __init__(self):
self.strokeWidth = 0
self.fillColor = None
self.strokeColor = STATE_DEFAULTS["strokeColor"]
self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"]
self.fontName = STATE_DEFAULTS["fontName"]
self.fontSize = STATE_DEFAULTS["fontSize"]
self.fontColor = STATE_DEFAULTS["fillColor"]
self.labelRadius = 1.2
self.markers = 0
self.markerType = None
self.markerSize = 0
self.label_dx = self.label_dy = self.label_angle = 0
self.label_text = None
self.label_topPadding = self.label_leftPadding = self.label_rightPadding = self.label_bottomPadding = 0
self.label_boxAnchor = 'c'
self.label_boxStrokeColor = None #boxStroke
self.label_boxStrokeWidth = 0.5 #boxStrokeWidth
self.label_boxFillColor = None
self.label_strokeColor = None
self.label_strokeWidth = 0.1
self.label_leading = self.label_width = self.label_maxWidth = self.label_height = None
self.label_textAnchor = 'start'
self.label_visible = 1
class SpiderChart(PlotArea):
_attrMap = AttrMap(BASE=PlotArea,
data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) numbers.'),
labels = AttrMapValue(isListOfStringsOrNone, desc="optional list of labels to use for each data point"),
startAngle = AttrMapValue(isNumber, desc="angle of first slice; like the compass, 0 is due North"),
direction = AttrMapValue( OneOf('clockwise', 'anticlockwise'), desc="'clockwise' or 'anticlockwise'"),
strands = AttrMapValue(None, desc="collection of strand descriptor objects"),
)
def __init__(self):
PlotArea.__init__(self)
self.data = [[10,12,14,16,14,12], [6,8,10,12,9,11]]
self.labels = None # or list of strings
self.startAngle = 90
self.direction = "clockwise"
self.strands = TypedPropertyCollection(StrandProperties)
self.strands[0].fillColor = colors.cornsilk
self.strands[1].fillColor = colors.cyan
def demo(self):
d = Drawing(200, 100)
sp = SpiderChart()
sp.x = 50
sp.y = 10
sp.width = 100
sp.height = 80
sp.data = [[10,12,14,16,18,20],[6,8,4,6,8,10]]
sp.labels = ['a','b','c','d','e','f']
d.add(sp)
return d
def normalizeData(self, outer = 0.0):
"""Turns data into normalized ones where each datum is < 1.0,
and 1.0 = maximum radius. Adds 10% at outside edge by default"""
data = self.data
theMax = 0.0
for row in data:
for element in row:
assert element >=0, "Cannot do spider plots of negative numbers!"
if element > theMax:
theMax = element
theMax = theMax * (1.0+outer)
scaled = []
for row in data:
scaledRow = []
for element in row:
scaledRow.append(element / theMax)
scaled.append(scaledRow)
return scaled
def draw(self):
# normalize slice data
g = self.makeBackground() or Group()
xradius = self.width/2.0
yradius = self.height/2.0
self._radius = radius = min(xradius, yradius)
centerx = self.x + xradius
centery = self.y + yradius
data = self.normalizeData()
n = len(data[0])
#labels
if self.labels is None:
labels = [''] * n
else:
labels = self.labels
#there's no point in raising errors for less than enough errors if
#we silently create all for the extreme case of no labels.
i = n-len(labels)
if i>0:
labels = labels + ['']*i
spokes = []
csa = []
angle = self.startAngle*pi/180
direction = self.direction == "clockwise" and -1 or 1
angleBetween = direction*(2 * pi)/n
markers = self.strands.markers
for i in xrange(n):
car = cos(angle)*radius
sar = sin(angle)*radius
csa.append((car,sar,angle))
spoke = Line(centerx, centery, centerx + car, centery + sar, strokeWidth = 0.5)
#print 'added spoke (%0.2f, %0.2f) -> (%0.2f, %0.2f)' % (spoke.x1, spoke.y1, spoke.x2, spoke.y2)
spokes.append(spoke)
if labels:
si = self.strands[i]
text = si.label_text
if text is None: text = labels[i]
if text:
labelRadius = si.labelRadius
L = WedgeLabel()
L.x = centerx + labelRadius*car
L.y = centery + labelRadius*sar
L.boxAnchor = si.label_boxAnchor
L._pmv = angle*180/pi
L.dx = si.label_dx
L.dy = si.label_dy
L.angle = si.label_angle
L.boxAnchor = si.label_boxAnchor
L.boxStrokeColor = si.label_boxStrokeColor
L.boxStrokeWidth = si.label_boxStrokeWidth
L.boxFillColor = si.label_boxFillColor
L.strokeColor = si.label_strokeColor
L.strokeWidth = si.label_strokeWidth
L._text = text
L.leading = si.label_leading
L.width = si.label_width
L.maxWidth = si.label_maxWidth
L.height = si.label_height
L.textAnchor = si.label_textAnchor
L.visible = si.label_visible
L.topPadding = si.label_topPadding
L.leftPadding = si.label_leftPadding
L.rightPadding = si.label_rightPadding
L.bottomPadding = si.label_bottomPadding
L.fontName = si.fontName
L.fontSize = si.fontSize
L.fillColor = si.fontColor
spokes.append(L)
angle = angle + angleBetween
# now plot the polygons
rowIdx = 0
for row in data:
# series plot
points = []
car, sar = csa[-1][:2]
r = row[-1]
points.append(centerx+car*r)
points.append(centery+sar*r)
for i in xrange(n):
car, sar = csa[i][:2]
r = row[i]
points.append(centerx+car*r)
points.append(centery+sar*r)
# make up the 'strand'
strand = Polygon(points)
strand.fillColor = self.strands[rowIdx].fillColor
strand.strokeColor = self.strands[rowIdx].strokeColor
strand.strokeWidth = self.strands[rowIdx].strokeWidth
strand.strokeDashArray = self.strands[rowIdx].strokeDashArray
g.add(strand)
# put in a marker, if it needs one
if markers:
if hasattr(self.strands[rowIdx], 'markerType'):
uSymbol = self.strands[rowIdx].markerType
elif hasattr(self.strands, 'markerType'):
uSymbol = self.strands.markerType
else:
uSymbol = None
m_x = centerx+car*r
m_y = centery+sar*r
m_size = self.strands[rowIdx].markerSize
m_fillColor = self.strands[rowIdx].fillColor
m_strokeColor = self.strands[rowIdx].strokeColor
m_strokeWidth = self.strands[rowIdx].strokeWidth
m_angle = 0
if type(uSymbol) is type(''):
symbol = makeMarker(uSymbol,
size = m_size,
x = m_x,
y = m_y,
fillColor = m_fillColor,
strokeColor = m_strokeColor,
strokeWidth = m_strokeWidth,
angle = m_angle,
)
else:
symbol = uSymbol2Symbol(uSymbol,m_x,m_y,m_fillColor)
for k,v in (('size', m_size), ('fillColor', m_fillColor),
('x', m_x), ('y', m_y),
('strokeColor',m_strokeColor), ('strokeWidth',m_strokeWidth),
('angle',m_angle),):
try:
setattr(uSymbol,k,v)
except:
pass
g.add(symbol)
rowIdx = rowIdx + 1
# spokes go over strands
for spoke in spokes:
g.add(spoke)
return g
def sample1():
"Make a simple spider chart"
d = Drawing(400, 400)
pc = SpiderChart()
pc.x = 50
pc.y = 50
pc.width = 300
pc.height = 300
pc.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8,3]]
pc.labels = ['a','b','c','d','e','f']
pc.strands[2].fillColor=colors.palegreen
d.add(pc)
return d
def sample2():
"Make a spider chart with markers, but no fill"
d = Drawing(400, 400)
pc = SpiderChart()
pc.x = 50
pc.y = 50
pc.width = 300
pc.height = 300
pc.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8,3]]
pc.labels = ['U','V','W','X','Y','Z']
pc.strands.strokeWidth = 2
pc.strands[0].fillColor = None
pc.strands[1].fillColor = None
pc.strands[2].fillColor = None
pc.strands[0].strokeColor = colors.red
pc.strands[1].strokeColor = colors.blue
pc.strands[2].strokeColor = colors.green
pc.strands.markers = 1
pc.strands.markerType = "FilledDiamond"
pc.strands.markerSize = 6
d.add(pc)
return d
if __name__=='__main__':
d = sample1()
from reportlab.graphics.renderPDF import drawToFile
drawToFile(d, 'spider.pdf')
d = sample2()
drawToFile(d, 'spider2.pdf')
#print 'saved spider.pdf'
| 39.89548
| 115
| 0.547971
|
79504cecc47c361158d211cf030837cc970e51f1
| 10,739
|
py
|
Python
|
msw/config/configs/spatial.py
|
takaratruong/minishapeworld
|
ee23712d59248c502f5b9247916a60853f21b508
|
[
"MIT"
] | 4
|
2020-04-04T23:25:14.000Z
|
2021-03-19T02:05:45.000Z
|
msw/config/configs/spatial.py
|
takaratruong/minishapeworld
|
ee23712d59248c502f5b9247916a60853f21b508
|
[
"MIT"
] | 2
|
2022-01-13T03:46:49.000Z
|
2022-03-12T00:58:20.000Z
|
msw/config/configs/spatial.py
|
takaratruong/minishapeworld
|
ee23712d59248c502f5b9247916a60853f21b508
|
[
"MIT"
] | 2
|
2020-07-03T01:41:27.000Z
|
2021-06-27T21:01:42.000Z
|
from collections import namedtuple
from enum import Enum
import numpy as np
from ... import color
from ... import constants as C
from ... import shape
from .. import spec
from .. import util as config_util
from . import configbase
_SpatialConfigBase = namedtuple("SpatialConfig", ["shapes", "relation", "dir"])
def matches(spc, shape_):
"""
Return True if the shape adheres to the spc (spc has optional color/shape
restrictions)
"""
(c, s) = spc
matches_color = c is None or (shape_.color == c)
matches_shape = s is None or (shape_.name == s)
return matches_color and matches_shape
def has_relation(d1, d2, relation, relation_dir):
if relation == 0:
if relation_dir == 0:
return d1.left(d2)
else:
return d1.right(d2)
else:
if relation_dir == 0:
return d1.below(d2)
else:
return d1.above(d2)
class SpatialConfig(configbase._ConfigBase, _SpatialConfigBase):
def matches_shapes(self, shape_):
"""
Return a list which contains 0 or 1 if the shape adheres to the match
"""
m = []
for i, spc in enumerate(self.shapes):
if matches(spc, shape_):
m.append(i)
return m
def does_not_validate(self, existing_shapes, new_shape):
"""
Check if a new shape has the config's spatial relation with any
existing shapes.
Only compares a single new shape with existing shapes O(n) rather than
pairwise comparisons among existing shapes (O(n^2)).
"""
# import ipdb; ipdb.set_trace()
for match_i in self.matches_shapes(new_shape):
shapes = [None, None]
# target is the opposite
target_i = 1 - match_i
shapes[match_i] = new_shape
for es in existing_shapes:
shapes[target_i] = es
if matches(self.shapes[target_i], es):
if has_relation(*shapes, self.relation, self.dir):
return False
return True
def format(self, lang_type="standard"):
if lang_type not in ("standard", "simple"):
raise NotImplementedError(f"lang_type = {lang_type}")
(s1, s2), relation, relation_dir = self
if relation == 0:
if relation_dir == 0:
if lang_type == "standard":
rel_txt = "is to the left of"
else:
rel_txt = "left"
else:
if lang_type == "standard":
rel_txt = "is to the right of"
else:
rel_txt = "right"
else:
if relation_dir == 0:
if lang_type == "standard":
rel_txt = "is below"
else:
rel_txt = "below"
else:
if lang_type == "standard":
rel_txt = "is above"
else:
rel_txt = "above"
if s1[0] is None:
s1_0_txt = ""
else:
s1_0_txt = s1[0]
if s1[1] is None:
s1_1_txt = "shape"
else:
s1_1_txt = s1[1]
if s2[0] is None:
s2_0_txt = ""
else:
s2_0_txt = s2[0]
if s2[1] is None:
s2_1_txt = "shape"
else:
s2_1_txt = s2[1]
if lang_type == "standard":
s1_article = config_util.a_or_an(s1_0_txt + s1_1_txt)
s2_article = config_util.a_or_an(s2_0_txt + s2_1_txt)
period_txt = "."
else:
s1_article = ""
s2_article = ""
period_txt = ""
parts = [
s1_article,
s1_0_txt,
s1_1_txt,
rel_txt,
s2_article,
s2_0_txt,
s2_1_txt,
period_txt,
]
return " ".join(s for s in parts if s != "")
def json(self):
return {
"type": "spatial",
"shapes": [{"color": s[0], "shape": s[1]} for s in self.shapes],
"relation": self.relation,
"relation_dir": self.dir,
}
@classmethod
def random(cls):
# 0 -> only shape specified
# 1 -> only color specified
# 2 -> only both specified
shape_1 = spec.ShapeSpec.random()
shape_2 = spec.ShapeSpec.random()
if shape_1 == shape_2:
return cls.random() # Try again
relation = np.random.randint(2)
relation_dir = np.random.randint(2)
return cls((shape_1, shape_2), relation, relation_dir)
def invalidate(self):
# Invalidate by randomly choosing one property to change:
(
(shape_1, shape_2),
relation,
relation_dir,
) = self
properties = []
if shape_1.color is not None:
properties.append(SpatialInvalidateProps.SHAPE_1_COLOR)
if shape_1.shape is not None:
properties.append(SpatialInvalidateProps.SHAPE_1_SHAPE)
if shape_2.color is not None:
properties.append(SpatialInvalidateProps.SHAPE_2_COLOR)
if shape_2.shape is not None:
properties.append(SpatialInvalidateProps.SHAPE_2_SHAPE)
sp = len(properties)
# Invalidate relations half of the time
for _ in range(sp):
properties.append(SpatialInvalidateProps.RELATION_DIR)
# Randomly select property to invalidate
# TODO: Support for invalidating multiple properties
invalid_prop = np.random.choice(properties)
if invalid_prop == SpatialInvalidateProps.SHAPE_1_COLOR:
inv_cfg = (
(
spec.ShapeSpec(color.new(shape_1.color), shape_1.shape),
shape_2,
),
relation,
relation_dir,
)
elif invalid_prop == SpatialInvalidateProps.SHAPE_1_SHAPE:
inv_cfg = (
(
spec.ShapeSpec(shape_1.color, shape.new(shape_1.shape)),
shape_2,
),
relation,
relation_dir,
)
elif invalid_prop == SpatialInvalidateProps.SHAPE_2_COLOR:
inv_cfg = (
(
shape_1,
spec.ShapeSpec(color.new(shape_2.color), shape_2.shape),
),
relation,
relation_dir,
)
elif invalid_prop == SpatialInvalidateProps.SHAPE_2_SHAPE:
inv_cfg = (
(
shape_1,
spec.ShapeSpec(shape_2.color, shape.new(shape_2.shape)),
),
relation,
relation_dir,
)
elif invalid_prop == SpatialInvalidateProps.RELATION_DIR:
inv_cfg = (
(shape_1, shape_2),
relation,
1 - relation_dir,
)
else:
raise RuntimeError
return type(self)(*inv_cfg)
def instantiate(self, label, n_distractors=0, shape_kwargs=None, **kwargs):
"""
Generate a single spatial relation according to the config,
invalidating it if the label is 0.
"""
cfg_attempts = 0
while cfg_attempts < C.MAX_INVALIDATE_ATTEMPTS:
new_cfg = self if label else self.invalidate()
(ss1, ss2), relation, relation_dir = new_cfg
s2 = self.add_shape_from_spec(ss2, relation, relation_dir, shape_kwargs=shape_kwargs)
# Place second shape
attempts = 0
while attempts < C.MAX_PLACEMENT_ATTEMPTS:
s1 = self.add_shape_rel(ss1, s2, relation, relation_dir, shape_kwargs=shape_kwargs)
if not s2.intersects(s1):
break
attempts += 1
else:
raise RuntimeError(
"Could not place shape onto image without intersection"
)
if label: # Positive example
break
elif self.does_not_validate([s1], s2):
break
cfg_attempts += 1
# Place distractor shapes
shapes = [s1, s2]
n_dist = self.sample_n_distractor(n_distractors)
for _ in range(n_dist):
attempts = 0
while attempts < C.MAX_DISTRACTOR_PLACEMENT_ATTEMPTS:
dss = self.sample_distractor(existing_shapes=shapes)
ds = self.add_shape(dss, shape_kwargs=shape_kwargs)
# No intersections
if not any(ds.intersects(s) for s in shapes):
if label:
shapes.append(ds)
break
else:
# If this is a *negative* example, we should not have
# the relation expressed by the original config
if self.does_not_validate(shapes, ds):
shapes.append(ds)
break
attempts += 1
else:
raise RuntimeError(
"Could not place distractor onto " "image without intersection"
)
return new_cfg, shapes
def add_shape_rel(self, spec, oth_shape, relation, relation_dir, shape_kwargs=None):
"""
Add shape, obeying the relation/relation_dir w.r.t. oth shape
"""
if shape_kwargs is None:
shape_kwargs = {}
color_, shape_ = spec
if shape_ is None:
shape_ = shape.random()
if color_ is None:
color_ = color.random()
if relation == 0:
new_y = shape.rand_pos()
if relation_dir == 0:
# Shape must be LEFT of oth shape
new_x = np.random.randint(C.X_MIN, oth_shape.x - C.BUFFER)
else:
# Shape RIGHT of oth shape
new_x = np.random.randint(oth_shape.x + C.BUFFER, C.X_MAX)
else:
new_x = shape.rand_pos()
if relation_dir == 0:
# BELOW (remember y coords reversed)
new_y = np.random.randint(oth_shape.y + C.BUFFER, C.X_MAX)
else:
# ABOVE
new_y = np.random.randint(C.X_MIN, oth_shape.y - C.BUFFER)
return shape.SHAPE_IMPLS[shape_](x=new_x, y=new_y, color_=color_, **shape_kwargs)
class SpatialInvalidateProps(Enum):
SHAPE_1_COLOR = 0
SHAPE_1_SHAPE = 1
SHAPE_2_COLOR = 2
SHAPE_2_SHAPE = 3
RELATION_DIR = 4
| 33.876972
| 99
| 0.521371
|
79504d96196fc00e92ed8f5d2cf47d27e87b96ab
| 63,620
|
py
|
Python
|
tests/unit/gapic/compute_v1/test_firewalls.py
|
georgiyekkert/python-compute
|
d128efbb3bf10af9b41e55b20aaa8080b3221e77
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/compute_v1/test_firewalls.py
|
georgiyekkert/python-compute
|
d128efbb3bf10af9b41e55b20aaa8080b3221e77
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/compute_v1/test_firewalls.py
|
georgiyekkert/python-compute
|
d128efbb3bf10af9b41e55b20aaa8080b3221e77
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.firewalls import FirewallsClient
from google.cloud.compute_v1.services.firewalls import pagers
from google.cloud.compute_v1.services.firewalls import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert FirewallsClient._get_default_mtls_endpoint(None) is None
assert FirewallsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
FirewallsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
FirewallsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
FirewallsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert FirewallsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [FirewallsClient,])
def test_firewalls_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name", [(transports.FirewallsRestTransport, "rest"),]
)
def test_firewalls_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [FirewallsClient,])
def test_firewalls_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
def test_firewalls_client_get_transport_class():
transport = FirewallsClient.get_transport_class()
available_transports = [
transports.FirewallsRestTransport,
]
assert transport in available_transports
transport = FirewallsClient.get_transport_class("rest")
assert transport == transports.FirewallsRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(FirewallsClient, transports.FirewallsRestTransport, "rest"),],
)
@mock.patch.object(
FirewallsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallsClient)
)
def test_firewalls_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(FirewallsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(FirewallsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(FirewallsClient, transports.FirewallsRestTransport, "rest", "true"),
(FirewallsClient, transports.FirewallsRestTransport, "rest", "false"),
],
)
@mock.patch.object(
FirewallsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallsClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_firewalls_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(FirewallsClient, transports.FirewallsRestTransport, "rest"),],
)
def test_firewalls_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(FirewallsClient, transports.FirewallsRestTransport, "rest"),],
)
def test_firewalls_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_delete_rest(
transport: str = "rest", request_type=compute.DeleteFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete(request)
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "firewall": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", firewall="firewall_value",)
mock_args.update(sample_request)
client.delete(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}"
% client.transport._host,
args[1],
)
def test_delete_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteFirewallRequest(),
project="project_value",
firewall="firewall_value",
)
def test_get_rest(transport: str = "rest", request_type=compute.GetFirewallRequest):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Firewall(
creation_timestamp="creation_timestamp_value",
description="description_value",
destination_ranges=["destination_ranges_value"],
direction=compute.Firewall.Direction.EGRESS,
disabled=True,
id=205,
kind="kind_value",
name="name_value",
network="network_value",
priority=898,
self_link="self_link_value",
source_ranges=["source_ranges_value"],
source_service_accounts=["source_service_accounts_value"],
source_tags=["source_tags_value"],
target_service_accounts=["target_service_accounts_value"],
target_tags=["target_tags_value"],
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Firewall.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Firewall)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.destination_ranges == ["destination_ranges_value"]
assert response.direction == compute.Firewall.Direction.EGRESS
assert response.disabled is True
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.network == "network_value"
assert response.priority == 898
assert response.self_link == "self_link_value"
assert response.source_ranges == ["source_ranges_value"]
assert response.source_service_accounts == ["source_service_accounts_value"]
assert response.source_tags == ["source_tags_value"]
assert response.target_service_accounts == ["target_service_accounts_value"]
assert response.target_tags == ["target_tags_value"]
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Firewall()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Firewall.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "firewall": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", firewall="firewall_value",)
mock_args.update(sample_request)
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetFirewallRequest(),
project="project_value",
firewall="firewall_value",
)
def test_insert_rest(
transport: str = "rest", request_type=compute.InsertFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_rest_bad_request(
transport: str = "rest", request_type=compute.InsertFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert(request)
def test_insert_rest_from_dict():
test_insert_rest(request_type=dict)
def test_insert_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
mock_args.update(sample_request)
client.insert(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls"
% client.transport._host,
args[1],
)
def test_insert_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert(
compute.InsertFirewallRequest(),
project="project_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
def test_list_rest(transport: str = "rest", request_type=compute.ListFirewallsRequest):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.FirewallList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.FirewallList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListFirewallsRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.FirewallList()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.FirewallList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListFirewallsRequest(), project="project_value",
)
def test_list_rest_pager():
client = FirewallsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.FirewallList(
items=[compute.Firewall(), compute.Firewall(), compute.Firewall(),],
next_page_token="abc",
),
compute.FirewallList(items=[], next_page_token="def",),
compute.FirewallList(items=[compute.Firewall(),], next_page_token="ghi",),
compute.FirewallList(items=[compute.Firewall(), compute.Firewall(),],),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.FirewallList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.Firewall) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_patch_rest(transport: str = "rest", request_type=compute.PatchFirewallRequest):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_patch_rest_bad_request(
transport: str = "rest", request_type=compute.PatchFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.patch(request)
def test_patch_rest_from_dict():
test_patch_rest(request_type=dict)
def test_patch_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "firewall": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
mock_args.update(sample_request)
client.patch(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}"
% client.transport._host,
args[1],
)
def test_patch_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch(
compute.PatchFirewallRequest(),
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
def test_update_rest(
transport: str = "rest", request_type=compute.UpdateFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_update_rest_bad_request(
transport: str = "rest", request_type=compute.UpdateFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.update(request)
def test_update_rest_from_dict():
test_update_rest(request_type=dict)
def test_update_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "firewall": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
mock_args.update(sample_request)
client.update(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}"
% client.transport._host,
args[1],
)
def test_update_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update(
compute.UpdateFirewallRequest(),
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FirewallsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FirewallsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirewallsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.FirewallsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirewallsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FirewallsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FirewallsClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.FirewallsRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_firewalls_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FirewallsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_firewalls_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FirewallsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"insert",
"list",
"patch",
"update",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_firewalls_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FirewallsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_firewalls_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FirewallsTransport()
adc.assert_called_once()
def test_firewalls_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FirewallsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_firewalls_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.FirewallsRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_firewalls_host_no_port():
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_firewalls_host_with_port():
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FirewallsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = FirewallsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = FirewallsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = FirewallsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = FirewallsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = FirewallsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = FirewallsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = FirewallsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FirewallsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = FirewallsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FirewallsTransport, "_prep_wrapped_messages"
) as prep:
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FirewallsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FirewallsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 38.487598
| 105
| 0.685775
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.