content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
from pythainlp.tokenize import etcc
print(etcc.etcc('คืนความสุข')) # /คืน/ความสุข |
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, ListNode):
return self.val == other.val and self.next == other.next
return False
|
# Generated by Django 2.2 on 2019-04-25 08:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=200)),
('taskdate', models.DateTimeField()),
('complete', models.BooleanField(default=False)),
('expire', models.BooleanField(default=False)),
('started', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name_plural': 'Todo',
'ordering': ('taskdate',),
},
),
]
|
from logic import judicator
judicator.activate()
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Clyde McQueen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Launch orb_slam2_ros simulation test environment."""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
# Use a simpler urdf file: no forward camera, no barometer, no thrust, no drag
# Does contain a motion plugin, so the AUV will be pushed around in a repeating pattern
orca_description_dir = get_package_share_directory('orca_description')
urdf_file = os.path.join(orca_description_dir, 'urdf', 'slam_test.urdf')
# No fiducial markers
orca_gazebo_dir = get_package_share_directory('orca_gazebo')
world_file = os.path.join(orca_gazebo_dir, 'worlds', 'empty.world')
# ORB features vocabulary file
# This works well in simulation, but I'm sure how it will do in a marine environment
orb_slam_dir = get_package_share_directory('orb_slam2_ros')
orb_voc_file = os.path.join(orb_slam_dir, 'orb_slam2', 'Vocabulary', 'ORBvoc.txt')
# Orb-slam2 params
orca_bringup_dir = get_package_share_directory('orca_bringup')
slam_params_file = os.path.join(orca_bringup_dir, 'params', 'slam_test_params.yaml')
# Rviz config
rviz_cfg_file = os.path.join(orca_bringup_dir, 'cfg', 'slam_test_launch.rviz')
return LaunchDescription([
DeclareLaunchArgument(
'gzclient',
default_value='False',
description='Launch Gazebo UI?'),
DeclareLaunchArgument(
'rviz',
default_value='True',
description='Launch rviz?'),
# Launch gzserver
ExecuteProcess(
cmd=['gzserver',
'-s', 'libgazebo_ros_init.so', # Publish /clock
'-s', 'libgazebo_ros_factory.so', # Injection endpoint
world_file],
output='screen'),
# Launch gzclient
ExecuteProcess(
cmd=['gzclient'],
output='screen',
condition=IfCondition(LaunchConfiguration('gzclient'))),
# Launch rviz
ExecuteProcess(
cmd=['rviz2', '-d', rviz_cfg_file],
output='screen',
condition=IfCondition(LaunchConfiguration('rviz'))),
# Replacement for base_controller: odom->base_link is static
ExecuteProcess(
cmd=['/opt/ros/foxy/lib/tf2_ros/static_transform_publisher',
'0', '0', '0', '0', '0', '0', 'odom', 'base_link',
'--ros-args', '-p', 'use_sim_time:=true'],
output='screen'),
# Inject the urdf file
Node(
package='sim_fiducial',
executable='inject_entity.py',
output='screen',
arguments=[urdf_file, '0', '0', '0', '0', '0', '0'],
parameters=[{'use_sim_time': True}]),
# Publish static transforms from the urdf
Node(
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
name='robot_state_publisher',
arguments=[urdf_file],
parameters=[{'use_sim_time': True}]),
# Run orb_slam2_ros_stereo
Node(
package='orb_slam2_ros',
executable='orb_slam2_ros_stereo',
output='screen',
name='orb_slam2_stereo',
parameters=[slam_params_file, {
'voc_file': orb_voc_file,
}],
remappings=[
('/image_left/image_color_rect', '/stereo/left/image_raw'),
('/image_right/image_color_rect', '/stereo/right/image_raw'),
('/camera/camera_info', '/stereo/left/camera_info'),
]),
# Run orb_slam2_localizer, a shim that publishes tf map->odom
Node(
package='orca_localize',
executable='orb_slam2_localizer',
output='screen',
name='orb_slam2_localizer',
parameters=[slam_params_file],
remappings=[
('/camera_pose', '/orb_slam2_stereo_node/pose'),
]),
])
|
import datetime
from jcudc24ingesterapi.ingester_exceptions import UnknownParameterError
from jcudc24ingesterapi import typed, APIDomainObject, format_timestamp
from jcudc24ingesterapi.models.locations import LocationOffset
__author__ = 'Casey Bajema'
class DataEntry(APIDomainObject):
"""
Base class for individual data points of a dataset.
DataEntry objects will be used for each data point where the actual data is passed in through the
kwargs argument.
The kwargs parameters must conform to the data_type schema in the dataset or an exception will be thrown on initialisation.
The (dataset, id) tuple uniquely identifies the data entry.
"""
__xmlrpc_class__ = "data_entry"
id = typed("_id", int, "An identifier for the data entry")
dataset = typed("_dataset", int, "The dataset ID")
timestamp = typed("_timestamp", datetime.datetime, "The timestamp for the entry")
location_offset = typed("_location_offset", LocationOffset, "Offset from the locations frame of reference")
data = typed("_data", dict, "Data storage")
def __init__(self, dataset=None, timestamp=None, id = None, **kwargs):
self.id = id
self.dataset = dataset
self.timestamp = timestamp
self.data = {}
# Push the kwargs to fields
# for key in data_type_schema.keys():
# self[key] = kwargs.pop(key, None)
for key, value in kwargs:
raise UnknownParameterError(key, value)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, item, value):
self.data[item] = value
def __delitem__(self, item):
del self.data[item]
def __str__(self):
ret = "Time: %s Dataset: %s\n"%(format_timestamp(self.timestamp), self.dataset)
for k in self.data:
ret += "\t%s = %s\n"%(k, self.data[k])
return ret
class FileObject(object):
"""This object references a file on disk that is to be downloaded
"""
__xmlrpc_class__ = "file_object"
f_path = typed("_f_path", str, "Internal filename")
mime_type = typed("_mime_type", str, "The mime type of the file")
file_name = typed("_file_name", str, "The original file name")
def __init__(self, f_path=None, f_handle=None, mime_type=None, file_name=None):
self.f_handle = f_handle
self.f_path = f_path
self.mime_type = mime_type
self.file_name = file_name
def __str__(self):
ret = "FileObject("
ret += "f_path: %s"%self.f_path if self.f_path != None else "f_handle: %s"%self.f_handle
ret += ", mime_type: %s)"%self.mime_type
return ret
|
from setuptools import setup
from setuptools import find_packages
setup(name='DDrop',
version='0.0.7',
description='Mathematical Dropconnect',
author='Derek Khu, Andrea Santilli',
author_email='derek.khu@gmail.com',
license='MIT',
install_requires=['keras'],
packages=find_packages())
|
attribute = 42
|
# Generated by Django 2.0.2 on 2018-03-12 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vigil', '0015_auto_20180228_1503'),
]
operations = [
migrations.AddField(
model_name='alertchannel',
name='set_active',
field=models.DateTimeField(blank=True, null=True),
),
]
|
# -*- coding: utf-8 -*-
import lemoncheesecake.api as lcc
@lcc.suite("ProposedTransactions")
@lcc.tags("database_api_second_pipeline")
class ProposedTransactions:
pass
|
"""Combine code coverage files."""
from __future__ import annotations
import os
import json
import typing as t
from ...target import (
walk_compile_targets,
walk_powershell_targets,
)
from ...io import (
read_text_file,
)
from ...util import (
ANSIBLE_TEST_TOOLS_ROOT,
display,
ApplicationError,
)
from ...util_common import (
ResultType,
run_command,
write_json_file,
write_json_test_results,
)
from ...executor import (
Delegate,
)
from ...data import (
data_context,
)
from ...host_configs import (
DockerConfig,
RemoteConfig,
)
from ...provisioning import (
HostState,
prepare_profiles,
)
from . import (
enumerate_python_arcs,
enumerate_powershell_lines,
get_collection_path_regexes,
get_all_coverage_files,
get_python_coverage_files,
get_python_modules,
get_powershell_coverage_files,
initialize_coverage,
COVERAGE_OUTPUT_FILE_NAME,
COVERAGE_GROUPS,
CoverageConfig,
PathChecker,
)
def command_coverage_combine(args): # type: (CoverageCombineConfig) -> None
"""Patch paths in coverage files and merge into a single file."""
host_state = prepare_profiles(args) # coverage combine
combine_coverage_files(args, host_state)
def combine_coverage_files(args, host_state): # type: (CoverageCombineConfig, HostState) -> t.List[str]
"""Combine coverage and return a list of the resulting files."""
if args.delegate:
if isinstance(args.controller, (DockerConfig, RemoteConfig)):
paths = get_all_coverage_files()
exported_paths = [path for path in paths if os.path.basename(path).split('=')[-1].split('.')[:2] == ['coverage', 'combined']]
if not exported_paths:
raise ExportedCoverageDataNotFound()
pairs = [(path, os.path.relpath(path, data_context().content.root)) for path in exported_paths]
def coverage_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
"""Add the coverage files to the payload file list."""
display.info('Including %d exported coverage file(s) in payload.' % len(pairs), verbosity=1)
files.extend(pairs)
data_context().register_payload_callback(coverage_callback)
raise Delegate(host_state=host_state)
paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args, host_state)
for path in paths:
display.info('Generated combined output: %s' % path, verbosity=1)
return paths
class ExportedCoverageDataNotFound(ApplicationError):
"""Exception when no combined coverage data is present yet is required."""
def __init__(self):
super().__init__(
'Coverage data must be exported before processing with the `--docker` or `--remote` option.\n'
'Export coverage with `ansible-test coverage combine` using the `--export` option.\n'
'The exported files must be in the directory: %s/' % ResultType.COVERAGE.relative_path)
def _command_coverage_combine_python(args, host_state): # type: (CoverageCombineConfig, HostState) -> t.List[str]
"""Combine Python coverage files and return a list of the output files."""
coverage = initialize_coverage(args, host_state)
modules = get_python_modules()
coverage_files = get_python_coverage_files()
counter = 0
sources = _get_coverage_targets(args, walk_compile_targets)
groups = _build_stub_groups(args, sources, lambda s: dict((name, set()) for name in s))
collection_search_re, collection_sub_re = get_collection_path_regexes()
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
for filename, arcs in enumerate_python_arcs(coverage_file, coverage, modules, collection_search_re, collection_sub_re):
if args.export:
filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
if group not in groups:
groups[group] = {}
arc_data = groups[group]
if filename not in arc_data:
arc_data[filename] = set()
arc_data[filename].update(arcs)
output_files = []
if args.export:
coverage_file = os.path.join(args.export, '')
suffix = '=coverage.combined'
else:
coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME)
suffix = ''
path_checker = PathChecker(args, collection_search_re)
for group in sorted(groups):
arc_data = groups[group]
updated = coverage.CoverageData()
for filename in arc_data:
if not path_checker.check_path(filename):
continue
updated.add_arcs({filename: list(arc_data[filename])})
if args.all:
updated.add_arcs(dict((source[0], []) for source in sources))
if not args.explain:
output_file = coverage_file + group + suffix
updated.write_file(output_file) # always write files to make sure stale files do not exist
if updated:
# only report files which are non-empty to prevent coverage from reporting errors
output_files.append(output_file)
path_checker.report()
return sorted(output_files)
def _command_coverage_combine_powershell(args): # type: (CoverageCombineConfig) -> t.List[str]
"""Combine PowerShell coverage files and return a list of the output files."""
coverage_files = get_powershell_coverage_files()
def _default_stub_value(source_paths):
cmd = ['pwsh', os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'coverage_stub.ps1')]
cmd.extend(source_paths)
stubs = json.loads(run_command(args, cmd, capture=True, always=True)[0])
return dict((d['Path'], dict((line, 0) for line in d['Lines'])) for d in stubs)
counter = 0
sources = _get_coverage_targets(args, walk_powershell_targets)
groups = _build_stub_groups(args, sources, _default_stub_value)
collection_search_re, collection_sub_re = get_collection_path_regexes()
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
for filename, hits in enumerate_powershell_lines(coverage_file, collection_search_re, collection_sub_re):
if args.export:
filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
if group not in groups:
groups[group] = {}
coverage_data = groups[group]
if filename not in coverage_data:
coverage_data[filename] = {}
file_coverage = coverage_data[filename]
for line_no, hit_count in hits.items():
file_coverage[line_no] = file_coverage.get(line_no, 0) + hit_count
output_files = []
path_checker = PathChecker(args)
for group in sorted(groups):
coverage_data = dict((filename, data) for filename, data in groups[group].items() if path_checker.check_path(filename))
if args.all:
# Add 0 line entries for files not in coverage_data
for source, source_line_count in sources:
if source in coverage_data:
continue
coverage_data[source] = _default_stub_value(source_line_count)
if not args.explain:
if args.export:
output_file = os.path.join(args.export, group + '=coverage.combined')
write_json_file(output_file, coverage_data, formatted=False)
output_files.append(output_file)
continue
output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell'
write_json_test_results(ResultType.COVERAGE, output_file, coverage_data, formatted=False)
output_files.append(os.path.join(ResultType.COVERAGE.path, output_file))
path_checker.report()
return sorted(output_files)
def _get_coverage_targets(args, walk_func): # type: (CoverageCombineConfig, t.Callable) -> t.List[t.Tuple[str, int]]
"""Return a list of files to cover and the number of lines in each file, using the given function as the source of the files."""
sources = []
if args.all or args.stub:
# excludes symlinks of regular files to avoid reporting on the same file multiple times
# in the future it would be nice to merge any coverage for symlinks into the real files
for target in walk_func(include_symlinks=False):
target_path = os.path.abspath(target.path)
target_lines = len(read_text_file(target_path).splitlines())
sources.append((target_path, target_lines))
sources.sort()
return sources
def _build_stub_groups(args, sources, default_stub_value):
"""
:type args: CoverageCombineConfig
:type sources: List[tuple[str, int]]
:type default_stub_value: Func[List[str]]
:rtype: dict
"""
groups = {}
if args.stub:
stub_group = []
stub_groups = [stub_group]
stub_line_limit = 500000
stub_line_count = 0
for source, source_line_count in sources:
stub_group.append(source)
stub_line_count += source_line_count
if stub_line_count > stub_line_limit:
stub_line_count = 0
stub_group = []
stub_groups.append(stub_group)
for stub_index, stub_group in enumerate(stub_groups):
if not stub_group:
continue
groups['=stub-%02d' % (stub_index + 1)] = default_stub_value(stub_group)
return groups
def get_coverage_group(args, coverage_file): # type: (CoverageCombineConfig, str) -> t.Optional[str]
"""Return the name of the coverage group for the specified coverage file, or None if no group was found."""
parts = os.path.basename(coverage_file).split('=', 4)
# noinspection PyTypeChecker
if len(parts) != 5 or not parts[4].startswith('coverage.'):
return None
names = dict(
command=parts[0],
target=parts[1],
environment=parts[2],
version=parts[3],
)
export_names = dict(
version=parts[3],
)
group = ''
for part in COVERAGE_GROUPS:
if part in args.group_by:
group += '=%s' % names[part]
elif args.export:
group += '=%s' % export_names.get(part, 'various')
if args.export:
group = group.lstrip('=')
return group
class CoverageCombineConfig(CoverageConfig):
"""Configuration for the coverage combine command."""
def __init__(self, args): # type: (t.Any) -> None
super().__init__(args)
self.group_by = frozenset(args.group_by) if args.group_by else frozenset() # type: t.FrozenSet[str]
self.all = args.all # type: bool
self.stub = args.stub # type: bool
# only available to coverage combine
self.export = args.export if 'export' in args else False # type: str
|
import configparser # python 3.x
import yaml
# BASIC_SCRIPT = 'configs/basic_config.yaml'
# RUNNING_SCRIPT = 'configs/train_deform.yaml'
# with open(BASIC_SCRIPT,'r') as f:
# bconfig = yaml.load(f)
# with open(RUNNING_SCRIPT,'r') as f:
# sconfig = yaml.load(f)
### UVBW Training
UVBW_TEST = True
UVBW_TRAIN = False
DEFORM_TRAIN = True
DEFORM_CHECK = True
DEFORM_TEST = False |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import re
from releasetool.commands.tag.dotnet import RELEASE_LINE_PATTERN, kokoro_job_name
release_triggering_lines = [
("Release Google.LongRunning version 1.2.3", "Google.LongRunning", "1.2.3"),
(
"Release Google.LongRunning version 1.2.3-beta01",
"Google.LongRunning",
"1.2.3-beta01",
),
("- Release Google.LongRunning version 1.2.3", "Google.LongRunning", "1.2.3"),
]
non_release_triggering_lines = [
("Release new version of all OsLogin packages"),
("Release all OsLogin packages version 1.2.3"),
("Release Google.LongRunning version 1.0"),
("Release Google.LongRunning version 1.2.3 and 1.2.4"),
]
@pytest.mark.parametrize("line,package,version", release_triggering_lines)
def test_release_line_regex_matching(line, package, version):
"""
The regex can extract a well-formatted package and version
"""
match = re.search(RELEASE_LINE_PATTERN, line)
assert match is not None
assert match.group(1) == package
assert match.group(2) == version
@pytest.mark.parametrize("line", non_release_triggering_lines)
def test_release_line_regex_not_matching(line):
"""
The regex is strict enough not to match other lines.
"""
match = re.search(RELEASE_LINE_PATTERN, line)
assert match is None
def test_kokoro_job_name():
job_name = kokoro_job_name("upstream-owner/upstream-repo", "some-package-name")
assert job_name is None
|
"""
try except
"""
try:
a = 1 / 2
except:
print("Something went wrong")
try:
a = 1 / 0
except:
print("Something went wrong")
else: # Executes if no exceptions are raised
print("Everithings Ok.")
|
from django.urls import path
from .views import (
home,
personList,
newPerson,
updatePerson,
vehicleList,
newVehicle,
updateVehicle,
rotaryMotion,
newRotaryMotion,
updateRotaryMotion,
monthly,
newMonthly,
updateMonthly,
rotaryMonthly,
newRotaryMonthly,
updateRotaryMonthly,
)
urlpatterns = [
path('', home, name='core_home'),
#Person URLs
path('person_list/', personList, name='core_persons_list'),
path('person_new/', newPerson, name='core_persons_new'),
path('person_new/<id>/', updatePerson, name='core_persons_update'),
#Vehicle URLs
path('vehicle_list/', vehicleList, name='core_vehicles_list'),
path('vehicle_new/', newVehicle, name='core_vehicles_new'),
path('vehicle_update/<id>/', updateVehicle, name='core_vehicle_update'),
#Rotary Motion URLs
path('rotaryMotion_list/', rotaryMotion, name='core_rotaryMotion_list'),
path('rotaryMotion_new/', newRotaryMotion, name='core_rotaryMotion_new'),
path('rotaryMotion_update/<id>/', updateRotaryMotion, name='core_rotaryMotion_update'),
#Monthly URLs
path('monthly_list/', monthly, name='core_monthly_list'),
path('monthly_new/', newMonthly, name='core_monthly_new'),
path('monthly_update/<id>/', updateMonthly, name='core_monthly_update'),
#Rotary Monthly URLs
path('rotaryMonthly_list/', rotaryMonthly, name='core_rotaryMonthly_list'),
path('rotaryMonthly_new/', newRotaryMonthly, name='core_rotaryMonthly_new'),
path('rotaryMonthly_update/<id>/', updateRotaryMonthly, name='core_rotaryMonthly_update'),
]
|
from django.db import models
class CityCountyStateCode(models.Model):
feature_id = models.IntegerField()
feature_name = models.TextField(blank=True, null=True)
feature_class = models.TextField(blank=True, null=True)
census_code = models.TextField(blank=True, null=True)
census_class_code = models.TextField(blank=True, null=True)
gsa_code = models.TextField(blank=True, null=True)
opm_code = models.TextField(blank=True, null=True)
state_numeric = models.TextField(db_index=True, blank=True, null=True)
state_alpha = models.TextField()
county_sequence = models.IntegerField(blank=True, null=True)
county_numeric = models.TextField(blank=True, null=True)
county_name = models.TextField(blank=True, null=True)
primary_latitude = models.DecimalField(max_digits=13, decimal_places=8)
primary_longitude = models.DecimalField(max_digits=13, decimal_places=8)
date_created = models.DateField(blank=True, null=True)
date_edited = models.DateField(blank=True, null=True)
class Meta:
db_table = "ref_city_county_state_code"
# Not shown here is a unique index on feature_id, state_alpha, county_sequence, county_numeric (see
# migration file for exact syntax). Because Postgres does not enforce uniquity on nullable columns
# and because county_sequence and county_numeric are nullable, we have to perform some trickery to
# enforce uniqueness on the natural key columns. Note that, as far as I could tell, the source of
# these data do not provide actual, sanctioned natural keys. These were obtained through observation
# of the data; the only time features were repeated was when they spanned counties or states.
|
#
# @lc app=leetcode id=1299 lang=python3
#
# [1299] Replace Elements with Greatest Element on Right Side
#
import unittest
from typing import List
# @lc code=start
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
"""Start from the back and replace elements with the observed maximum."""
ans: List[int] = [-1] * len(arr)
cur_max = arr[len(arr) - 1]
for i in range(len(arr) - 2, -1, -1):
ans[i] = cur_max
cur_max = max(cur_max, arr[i])
return ans
# @lc code=end
class TestSolution(unittest.TestCase):
def test_example(self) -> None:
arr = [17, 18, 5, 4, 6, 1]
self.assertEqual(Solution().replaceElements(arr), [18, 6, 6, 6, 1, -1])
def test_single_element(self) -> None:
arr = [10]
self.assertEqual(Solution().replaceElements(arr), [-1])
if __name__ == "__main__":
unittest.main()
|
import turtle
turtle.bgcolor(0,0,0) # set the background of the window to black
# use RGB color to control the turtle pen color
turtle.colormode(255) # values for red, green, blue range from 0-255
R = 165
G = 42
B = 42
turtle.pencolor(R,G,B)
for i in range(4):
turtle.forward(100)
turtle.right(90)
|
import json
import yaml
from collections import OrderedDict
def from_json(json_str):
"""Reads a JSON string into an OrderedDict.
:param json_str: a JSON string
:returns: an OrderedDict of the JSON contents
"""
argsdict = json.loads(json_str, object_pairs_hook=OrderedDict)
return argsdict
def from_yaml(yaml_str):
"""Reads in a string of YAML into an OrderedDict.
:param yaml_str: the contents of a YAML file
:returns: an OrderedDict of the YAML contents
"""
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
# def _dict_constructor(loader, node):
# return OrderedDict(loader.construct_pairs(node))
# _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
# yaml.add_constructor(_mapping_tag, _dict_constructor)
argsdict = ordered_load(yaml_str, yaml.SafeLoader)
return argsdict
|
# Assignment base class
#
# @author: Kyle Benson
# (c) Kyle Benson 2012
from AgraderWorkflow import AgraderWorkflow
class Assignment(AgraderWorkflow):
'''Represents a single assignment (or submassignment) in the view of a single submission,
being possibly composed of different subassignments.
Supports several categories of callbacks (each of which accepts the assignment as an argument):
setup: runs at the beginning of a assignment
cleanup: runs after grading all the subassignments
grade: run when gathering grades for a submission
'''
def __mergeGrades(self, grades):
return self.grades.merge(grades)
def __init__(self):
self.assignments = {}
self.grades = {}
def addAssignment(self, assignment, priority=None):
self.assignments[assignment.name] = assignment
self.addCallback('subassignments', assignment)
def getAssignments(self):
return self.assignments.itervalues()
def getAssignment(self, key):
return self.assignments[key]
def __call__(self, parent=None):
'''Calls setup, runs itself and then each subassignment (by calling it),
calls any grade callbacks, then calls cleanup.'''
self.runCallbacks('setup', self)
self.runCallbacks('run', self)
self.runCallbacks('subassignments', self)
self.runCallbacks('grade', self)
self.runCallbacks('cleanup', self)
|
import numpy as np
from time import time
from qiskit.circuit import QuantumCircuit, ParameterVector
from qiskit.circuit.library import RealAmplitudes
from surfer.qfi import StochasticApproximation, LinearCombination, ReverseQFI
circuit = RealAmplitudes(5, reps=15)
values = np.ones(circuit.num_parameters)
print(circuit.num_parameters)
# x = ParameterVector('x', 2)
# circuit = QuantumCircuit(1)
# circuit.ry(x[0], 0)
# circuit.ry(x[1], 0)
# values = np.array([0.5, 0])
# reference implementation
# lc = LinearCombination()
# start = time()
# reference = lc.compute(circuit, values)
# print(f"Reference: {time() - start}")
# print(reference)
# stochastic approximation
# sa = StochasticApproximation(samples=1000, perturbation=0.1)
# approximated = sa.compute(circuit, values)
# print('Approximated:')
# print(approximated)
rev = ReverseQFI()
start = time()
revd = rev.compute(circuit, values)
print(f"Reverse mode: {time() - start}")
# print(np.mean(np.abs(revd - reference)))
|
#!/opt/manager/env/bin/python
import os
from datetime import datetime
from manager_rest import constants
from manager_rest.storage import models
from manager_rest.storage.models_base import db
from manager_rest.flask_utils import setup_flask_app
def create_system_filters():
with setup_flask_app().app_context():
current_deployment_filters = db.session.query(models.DeploymentsFilter)
curr_dep_filters_ids = {dep_filter.id for dep_filter
in current_deployment_filters}
creator = models.User.query.get(constants.BOOTSTRAP_ADMIN_ID)
tenant = models.Tenant.query.get(constants.DEFAULT_TENANT_ID)
now = datetime.utcnow()
if 'csys-environment-filter' not in curr_dep_filters_ids:
env_filter = {
'id': 'csys-environment-filter',
'value': [
{
'key': 'csys-obj-type',
'values': ['environment'],
'operator': 'any_of',
'type': 'label'
},
{
'key': 'csys-obj-parent',
'values': [],
'operator': 'is_null',
'type': 'label'
}
]
}
_add_deployments_filter(env_filter, creator, tenant, now)
if 'csys-service-filter' not in curr_dep_filters_ids:
service_filter = {
'id': 'csys-service-filter',
'value': [
{
'key': 'csys-obj-type',
'values': ['environment'],
'operator': 'is_not',
'type': 'label'
}
]
}
_add_deployments_filter(service_filter, creator, tenant, now)
if 'csys-k8s-filter' not in curr_dep_filters_ids:
service_filter = {
'id': 'csys-k8s-filter',
'value': [
{
'key': 'obj-type',
'values': ['k8s'],
'operator': 'any_of',
'type': 'label',
}
]
}
_add_deployments_filter(service_filter, creator, tenant, now)
db.session.commit()
def _add_deployments_filter(sys_filter_dict, creator, tenant, now):
sys_filter_dict['created_at'] = now
sys_filter_dict['updated_at'] = now
sys_filter_dict['visibility'] = 'global'
sys_filter_dict['is_system_filter'] = True
sys_filter_dict['creator'] = creator
sys_filter_dict['tenant'] = tenant
db.session.add(models.DeploymentsFilter(**sys_filter_dict))
def _add_blueprints_filter(sys_filter_dict, creator, tenant, now):
sys_filter_dict['created_at'] = now
sys_filter_dict['updated_at'] = now
sys_filter_dict['visibility'] = 'global'
sys_filter_dict['is_system_filter'] = True
sys_filter_dict['creator'] = creator
sys_filter_dict['tenant'] = tenant
db.session.add(models.BlueprintsFilter(**sys_filter_dict))
if __name__ == '__main__':
if 'MANAGER_REST_CONFIG_PATH' not in os.environ:
os.environ['MANAGER_REST_CONFIG_PATH'] = \
"/opt/manager/cloudify-rest.conf"
create_system_filters()
|
"""
@author: gcanco
Takes line sketch (input.png from input folder) and outputs painterly rendering (output.png into output folder)
"""
from msa.predictor import Predictor
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import cv2
import numpy as np
#read input
def run_pix2pix():
img_in = cv2.imread("input/input.png", 1)
img_out = np.empty([])
#convert input to black and white 256x256px
width = 256
height = 256
dim = (width, height)
(thresh, bwimg_in) = cv2.threshold(img_in, 127, 255, cv2.THRESH_BINARY)
finimg_in = cv2.resize(bwimg_in, dim, interpolation = cv2.INTER_AREA)
#run model
predictor = Predictor(json_path = './models/gart_canny_256.json')
img_predicted = predictor.predict(finimg_in)[0]
img_out = img_predicted
#output correct color mapping
cv2.imwrite("output/output.png", cv2.cvtColor(img_out.astype('float32')*255, cv2.COLOR_RGB2BGR))
predictor = None
|
from __future__ import unicode_literals
import json
from django.db import models
import requests
# Create your models here.
class Recommender(models.Model):
url = models.URLField()
name = models.CharField(max_length=120, default='my-recommender', unique=True)
def __str__(self):
return '{0} - {1}'.format(self.id, self.url)
def similar_books(self, book_id):
content = requests.get(self.url + str(book_id)).content
print(self.url + str(book_id))
return json.loads(content)
|
from itertools import product
from numbers import Number
import numpy as np
import pandas as pd
from skimage.feature import blob_log
from starfish.constants import Indices
from starfish.image import ImageStack
from starfish.munge import dataframe_to_multiindex
from starfish.intensity_table import IntensityTable
from starfish.util.argparse import FsExistsType
from ._base import SpotFinderAlgorithmBase
class GaussianSpotDetector(SpotFinderAlgorithmBase):
def __init__(
self, min_sigma, max_sigma, num_sigma, threshold,
blobs_stack, overlap=0.5, measurement_type='max', is_volume: bool=True, **kwargs
) -> None:
"""Multi-dimensional gaussian spot detector
Parameters
----------
min_sigma : float
The minimum standard deviation for Gaussian Kernel. Keep this low to
detect smaller blobs.
max_sigma : float
The maximum standard deviation for Gaussian Kernel. Keep this high to
detect larger blobs.
num_sigma : int
The number of intermediate values of standard deviations to consider
between `min_sigma` and `max_sigma`.
threshold : float
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
overlap : float [0, 1]
If two spots have more than this fraction of overlap, the spots are combined (default = 0.5)
blobs_stack : Union[ImageStack, str]
ImageStack or the path or URL that references the ImageStack that contains the blobs.
measurement_type : str ['max', 'mean']
name of the function used to calculate the intensity for each identified spot area
Notes
-----
This spot detector is very sensitive to the threshold that is selected, and the threshold is defined as an
absolute value -- therefore it must be adjusted depending on the datatype of the passed image.
"""
self.min_sigma = min_sigma
self.max_sigma = max_sigma
self.num_sigma = num_sigma
self.threshold = threshold
self.overlap = overlap
self.is_volume = is_volume
if isinstance(blobs_stack, ImageStack):
self.blobs_stack = blobs_stack
else:
self.blobs_stack = ImageStack.from_path_or_url(blobs_stack)
self.blobs_image: np.ndarray = self.blobs_stack.max_proj(Indices.HYB, Indices.CH)
try:
self.measurement_function = getattr(np, measurement_type)
except AttributeError:
raise ValueError(
f'measurement_type must be a numpy reduce function such as "max" or "mean". {measurement_type} '
f'not found.')
@staticmethod
def _measure_blob_intensity(image, blobs, measurement_function) -> pd.Series:
def fn(row: pd.Series) -> Number:
row = row.astype(int)
result = measurement_function(
image[
row['z_min']:row['z_max'],
row['y_min']:row['y_max'],
row['x_min']:row['x_max']
]
)
return result
return blobs.apply(
fn,
axis=1
)
def _measure_spot_intensities(
self, stack: ImageStack, spot_attributes: pd.DataFrame
) -> IntensityTable:
n_ch = stack.shape[Indices.CH]
n_hyb = stack.shape[Indices.HYB]
spot_attribute_index = dataframe_to_multiindex(spot_attributes)
intensity_table = IntensityTable.empty_intensity_table(spot_attribute_index, n_ch, n_hyb)
indices = product(range(n_ch), range(n_hyb))
for c, h in indices:
image, _ = stack.get_slice({Indices.CH: c, Indices.HYB: h})
blob_intensities: pd.Series = self._measure_blob_intensity(
image, spot_attributes, self.measurement_function)
intensity_table[:, c, h] = blob_intensities
return intensity_table
def _find_spot_locations(self) -> pd.DataFrame:
fitted_blobs_array: np.ndarray = blob_log(
self.blobs_image, self.min_sigma, self.max_sigma, self.num_sigma, self.threshold,
self.overlap)
if fitted_blobs_array.shape[0] == 0:
raise ValueError('No spots detected with provided parameters')
fitted_blobs = pd.DataFrame(data=fitted_blobs_array, columns=['z', 'y', 'x', 'r'])
# convert standard deviation of gaussian kernel used to identify spot to radius of spot
fitted_blobs['r'] = np.round(fitted_blobs['r'] * np.sqrt(3))
# convert the array to int so it can be used to index
fitted_blobs = fitted_blobs.astype(int)
for v, max_size in zip(['z', 'y', 'x'], self.blobs_image.shape):
fitted_blobs[f'{v}_min'] = np.clip(fitted_blobs[v] - fitted_blobs['r'], 0, None)
fitted_blobs[f'{v}_max'] = np.clip(fitted_blobs[v] + fitted_blobs['r'], None, max_size)
fitted_blobs['intensity'] = self._measure_blob_intensity(
self.blobs_image, fitted_blobs, self.measurement_function)
fitted_blobs['spot_id'] = np.arange(fitted_blobs.shape[0])
return fitted_blobs
def find(self, hybridization_image: ImageStack) -> IntensityTable:
"""find spots
Parameters
----------
hybridization_image : ImageStack
stack containing spots to find
Returns
-------
IntensityTable :
3d tensor containing the intensity of spots across channels and hybridization rounds
"""
spot_attributes = self._find_spot_locations()
intensity_table = self._measure_spot_intensities(hybridization_image, spot_attributes)
return intensity_table
@classmethod
def add_arguments(cls, group_parser):
group_parser.add_argument("--blobs-stack", type=FsExistsType(), required=True)
group_parser.add_argument(
"--min-sigma", default=4, type=int, help="Minimum spot size (in standard deviation)")
group_parser.add_argument(
"--max-sigma", default=6, type=int, help="Maximum spot size (in standard deviation)")
group_parser.add_argument("--num-sigma", default=20, type=int, help="Number of sigmas to try")
group_parser.add_argument("--threshold", default=.01, type=float, help="Dots threshold")
group_parser.add_argument(
"--overlap", default=0.5, type=float, help="dots with overlap of greater than this fraction are combined")
group_parser.add_argument(
"--show", default=False, action='store_true', help="display results visually")
|
# -*- coding: utf-8 -*-
"""mcpyrate-enabled `code.InteractiveConsole`.
Special commands:
- `obj?` shows obj's docstring, `obj??` shows its source code.
- `macros?` shows macro bindings.
- `macro(f)` binds a function as a macro. Works also as a decorator.
"""
# Based on `imacropy.console.MacroConsole` by Juha Jeronen,
# which was based on `macropy.core.MacroConsole` by Li Haoyi,
# Justin Holmgren, Alberto Berti and all the other contributors,
# 2013-2019. Used under the MIT license.
# https://github.com/azazel75/macropy
# https://github.com/Technologicat/imacropy
__all__ = ["MacroConsole"]
import ast
import code
import textwrap
from .. import __version__ as mcpyrate_version
from ..core import MacroExpansionError
from ..debug import format_bindings
from ..expander import find_macros, MacroExpander, global_postprocess
from .utils import get_makemacro_sourcecode
# Boot up `mcpyrate` so that the REPL can import modules that use macros.
# Despite the meta-levels, there's just one global importer for the Python process.
from .. import activate # noqa: F401
class MacroConsole(code.InteractiveConsole):
def __init__(self, locals=None, filename="<interactive input>"):
"""Parameters like in `code.InteractiveConsole`."""
self.expander = MacroExpander(bindings={}, filename=filename)
self._macro_bindings_changed = False
if locals is None:
locals = {}
# Lucky that both meta-levels speak the same language, eh?
locals['__macro_expander__'] = self.expander
super().__init__(locals, filename)
# Support for special REPL commands.
self._internal_execute(get_makemacro_sourcecode())
self._internal_execute("import mcpyrate.repl.utils")
def _internal_execute(self, source):
"""Execute given source in the console session.
This is support magic for internal operation of the console
session itself, e.g. for auto-loading macro functions.
The source must be pure Python, i.e. no macros.
The source is NOT added to the session history.
This bypasses `runsource`, so it too can use this function.
"""
source = textwrap.dedent(source)
tree = ast.parse(source)
tree = ast.Interactive(tree.body)
code = compile(tree, "<console internal>", "single", self.compile.compiler.flags, 1)
self.runcode(code)
def interact(self, banner=None, exitmsg=None):
"""See `code.InteractiveConsole.interact`.
The only thing we customize here is that if `banner is None`, in which case
`code.InteractiveConsole` will print its default banner, we print help for
our special commands and a line containing the `mcpyrate` version before that
default banner.
"""
if banner is None:
self.write(f"mcpyrate {mcpyrate_version} -- Advanced macro expander for Python.\n")
self.write("- obj? to view obj's docstring, and obj?? to view its source code.\n")
self.write("- macros? to view macro bindings.\n")
self.write("- macro(f) to bind function f as a macro. Works also as a decorator.\n")
return super().interact(banner, exitmsg)
def runsource(self, source, filename="<interactive input>", symbol="single"):
# Special REPL commands.
if source == "macros?":
self.write(format_bindings(self.expander))
return False # complete input
elif source.endswith("??"):
return self.runsource(f'mcpyrate.repl.utils.sourcecode({source[:-2]})')
elif source.endswith("?"):
return self.runsource(f"mcpyrate.repl.utils.doc({source[:-1]})")
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
code = ""
if code is None: # incomplete input
return True
try:
# TODO: If we want to support dialects in the REPL, this is where to do it.
tree = ast.parse(source)
bindings = find_macros(tree, filename=self.expander.filename, reload=True) # macro-imports (this will import the modules)
if bindings:
self._macro_bindings_changed = True
self.expander.bindings.update(bindings)
tree = self.expander.visit(tree)
tree = global_postprocess(tree)
tree = ast.Interactive(tree.body)
code = compile(tree, filename, symbol, self.compile.compiler.flags, 1)
except (OverflowError, SyntaxError, ValueError, MacroExpansionError):
self.showsyntaxerror(filename)
return False # erroneous input
except ModuleNotFoundError as err: # during macro module lookup
# In this case, the standard stack trace is long and points only to our code and the stdlib,
# not the erroneous input that's the actual culprit. Better ignore it, and emulate showsyntaxerror.
# TODO: support sys.excepthook.
self.write(f"{err.__class__.__name__}: {str(err)}\n")
return False # erroneous input
except ImportError as err: # during macro lookup in a successfully imported module
self.write(f"{err.__class__.__name__}: {str(err)}\n")
return False # erroneous input
self.runcode(code)
self._refresh_macro_functions()
return False # Successfully compiled. `runcode` takes care of any runtime failures.
def _refresh_macro_functions(self):
"""Refresh macro function imports.
Called after successfully compiling and running an input, so that
`some_macro.__doc__` points to the right docstring.
"""
if not self._macro_bindings_changed:
return
self._macro_bindings_changed = False
for asname, function in self.expander.bindings.items():
if not function.__module__: # Macros defined in the REPL have `__module__=None`.
continue
try:
source = f"from {function.__module__} import {function.__qualname__} as {asname}"
self._internal_execute(source)
except (ModuleNotFoundError, ImportError):
pass
|
"""
test for the module `handle_charges`
derived from MolVS's tests: https://github.com/mcs07/MolVS/blob/master/tests/test_charge.py
"""
import pytest
import sys
from rdkit import Chem
from opencadd.compounds.standardization import handle_charges
def _uncharge_smiles(smiles):
"""Utility function that returns the uncharged SMILES for a given
SMILES string.
"""
mol = Chem.MolFromSmiles(smiles)
mol = handle_charges.uncharge(mol)
if mol:
return Chem.MolToSmiles(mol, isomericSmiles=True)
def test_neutralization():
"""Test neutralization of ionized acids and bases."""
assert (
_uncharge_smiles("C(C(=O)[O-])(Cc1n[n-]nn1)(C[NH3+])(C[N+](=O)[O-])")
== "NCC(Cc1nn[nH]n1)(C[N+](=O)[O-])C(=O)O"
)
def test_zwitterion():
"""Test preservation of zwitterion."""
assert _uncharge_smiles("n(C)1cc[n+]2cccc([O-])c12") == "Cn1cc[n+]2cccc([O-])c12"
def test_choline():
"""Choline should be left with a positive charge."""
assert _uncharge_smiles("C[N+](C)(C)CCO") == "C[N+](C)(C)CCO"
def test_hydrogen():
"""This should have the hydrogen removed to give deanol as a charge parent."""
assert _uncharge_smiles("C[NH+](C)CCO") == "CN(C)CCO"
def test_neutrality():
"""Overall system is already neutral."""
assert _uncharge_smiles("[Na+].O=C([O-])c1ccccc1") == "O=C([O-])c1ccccc1.[Na+]"
def test_benzoate():
"""Benzoate ion to benzoic acid."""
assert _uncharge_smiles("O=C([O-])c1ccccc1") == "O=C(O)c1ccccc1"
def test_histidine():
"""Charges in histidine should be neutralized."""
assert _uncharge_smiles("[NH3+]C(Cc1cnc[nH]1)C(=O)[O-]") == "NC(Cc1cnc[nH]1)C(=O)O"
def test_fragment_neutralization():
"""Neutralize both fragments."""
assert _uncharge_smiles("C[NH+](C)(C).[Cl-]") == "CN(C)C.Cl"
def test_oxigen_neutralisation():
"""Neutralise one oxygen."""
assert _uncharge_smiles("[N+](=O)([O-])[O-]") == "O=[N+]([O-])[O-]"
def test_prefer_organic_fragments():
"""Smaller organic fragment should be chosen over larger inorganic fragment."""
assert _uncharge_smiles("[N+](=O)([O-])[O-].[CH2]") == "O=[N+]([O-])[O-].[CH2]"
def test_oxygen_balancing():
"""Single oxygen should be protonated, the other left to balance the positive nitrogen."""
assert _uncharge_smiles("C[N+](C)(C)CC([O-])C[O-]") == "C[N+](C)(C)CC([O-])CO"
def test_strongest_acid():
"""Strongest acid should be left ionized."""
assert (
_uncharge_smiles("[O-]C(=O)C[n+]1ccn2cccc([O-])c12")
== "O=C([O-])C[n+]1ccn2cccc(O)c21"
)
def test_charge_neutralization():
"""All charges should be neutralized."""
assert _uncharge_smiles("[NH+](C)(C)CC([O-])C[O-]") == "CN(C)CC(O)CO"
def test_uncharge():
"""All charges should be neutralized."""
assert _uncharge_smiles("CNCC([O-])C[O-]") == "CNCC(O)CO"
# Tests for Reionize
def _reionize_smiles(smiles):
"""Utility function that returns the uncharged SMILES for a given
SMILES string.
"""
mol = Chem.MolFromSmiles(smiles)
mol = handle_charges.reionize(mol)
if mol:
return Chem.MolToSmiles(mol)
def test_proton_to_weak_acid():
"""Test reionizer moves proton to weaker acid."""
assert (
_reionize_smiles("C1=C(C=CC(=C1)[S]([O-])=O)[S](O)(=O)=O")
== "O=S(O)c1ccc(S(=O)(=O)[O-])cc1"
)
def test_charged_carbon():
"""Test charged carbon doesn't get recognised as
alpha-carbon-hydrogen-keto.
"""
assert _reionize_smiles("CCOC(=O)C(=O)[CH-]C#N") == "CCOC(=O)C(=O)[CH-]C#N"
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
def test_find_gist_id():
from gistsync.utils import find_gist_id
assert '8b5d44890d95c58ab41b0fb85cb05dce' == find_gist_id('8b5d44890d95c58ab41b0fb85cb05dce')
assert '8b5d44890d95c58ab41b0fb85cb05dce' == find_gist_id('https://gist.github.com/Cologler/8b5d44890d95c58ab41b0fb85cb05dce')
|
from flask import Flask, request
from pymessenger.bot import Bot
from decouple import config
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = f'postgresql://{config("DB_USER")}:{config("DB_PASSWORD")}@{config("DB_HOST")}:5432/{config("DB_NAME")}'
db = SQLAlchemy(app)
# migrate = Migrate(app, db)
ACCESS_TOKEN = config('ACCESS_TOKEN')
VERIFY_TOKEN = config('VERIFY_TOKEN')
bot = Bot(ACCESS_TOKEN)
class User(db.Model):
id = db.Column(
db.Integer,
primary_key=True
)
facebook_id = db.Column(
db.String(200),
unique=True,
nullable=False
)
class Message(db.Model):
id = db.Column(
db.Integer,
primary_key=True
)
user = db.Column(
db.Integer,
db.ForeignKey('user.id'),
nullable=False
)
message = db.Column(
db.String(200),
nullable=False
)
response = db.Column(
db.String(200),
nullable=False
)
@app.route('/', methods=['GET', 'POST'])
def receive_message():
if request.method == 'GET':
token_sent = request.args.get("hub.verify_token")
return verify_fb_token(token_sent)
else:
output = request.get_json()
for event in output['entry']:
messaging = event['messaging']
for message in messaging:
if message.get('message'):
recipient_id = message['sender']['id']
message_text = message['message'].get('text')
if message_text:
if len(message_text) > 280 or not message_text.isnumeric():
send_message(recipient_id, message_text)
else:
user = User.query.filter_by(
facebook_id=recipient_id
)
if not user.first():
user = User(
facebook_id=recipient_id
)
db.session.add(user)
db.session.commit()
else:
user = user.first()
message_number = int(message_text)
response = ''
if message_number % 3 == 0:
response += 'Fizz'
if message_number % 5 == 0:
response += 'Buzz'
if not response:
response = str(message_number)
message = Message(
user=user.id,
message=message_text,
response=response
)
db.session.add(message)
db.session.commit()
send_message(recipient_id, response)
return "done"
def verify_fb_token(token_sent):
if token_sent == VERIFY_TOKEN:
return request.args.get("hub.challenge", "ok")
return 'Token de verificação inválido.'
def send_message(recipient_id, response):
bot.send_text_message(recipient_id, response)
return "done"
# Add description here about this if statement.
if __name__ == "__main__":
db.create_all()
app.run()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for testing dungeon
"""
from mockito import mock
from pyherc.data import Portal, floor_tile, wall_tile, add_portal
from pyherc.data import get_portal
from pyherc.test.builders import LevelBuilder
class TestDungeon:
"""
Tests for Dungeon class
"""
def __init__(self):
"""
Default constructor
"""
self.floor_rock = 1
self.wall_empty = None
def test_simple_level_creation(self):
"""
Test that simple level creation works
"""
level = (LevelBuilder()
.with_size((20, 20))
.with_floor_tile(self.floor_rock)
.with_wall_tile(self.wall_empty)
.build())
assert not (level is None)
assert(floor_tile(level, (5, 5)) == self.floor_rock)
assert(wall_tile(level, (0, 0)) == self.wall_empty)
def test_stair_linking(self):
"""
Test that stairs can be linked
"""
level1 = (LevelBuilder()
.with_size((20, 20))
.with_floor_tile(self.floor_rock)
.with_wall_tile(self.wall_empty)
.build())
level2 = (LevelBuilder()
.with_size((20, 20))
.with_floor_tile(self.floor_rock)
.with_wall_tile(self.wall_empty)
.build())
stairs1 = Portal((None, None), None)
stairs1.icon = 'stairs'
add_portal(level1, (10, 10), stairs1)
stairs2 = Portal((None, None), None)
add_portal(level2, (5, 5), stairs2, stairs1)
assert(stairs1.level == level1)
assert(stairs1.location == (10, 10))
assert(stairs1.get_other_end() == stairs2)
assert(stairs2.level == level2)
assert(stairs2.location == (5, 5))
assert(stairs2.get_other_end() == stairs1)
assert get_portal(level1, (10, 10)) == stairs1
assert get_portal(level2, (5, 5)) == stairs2
|
from django.apps import AppConfig
class BasicdeviceConfig(AppConfig):
name = 'card_reader'
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 12:35:23 2017
@author: Flame
"""
from enum import Enum
from colorama import Fore
#from colorama import init
#init(autoreset=False)
EMTY_SYMBOL = '*'
def paint(string, pointer):
out = ""
for i in range(len(string)):
if i == pointer:
out = out + Fore.RED + string[i] + Fore.RESET
else:
out = out + string[i]
return out
class Tape:
def __init__(self,tape):
self.tape= EMTY_SYMBOL + tape + EMTY_SYMBOL
self.pointer=1 # указатель на крайний левый не пустой символ
def __str__(self):
return paint(self.tape, self.pointer)
def change_curr_symbol(self, newS):
self.tape = self.tape[:self.pointer] + newS \
+ (self.tape[self.pointer+1:] if self.pointer+1<len(self.tape) else "")
def curr_symbol(self):
return self.tape[self.pointer]
def move_to(self, to):
if Move.Right==to:
self.pointer+=1
if Move.Left==to:
self.pointer-=1
class Move(Enum):
Left=1
Right=2
Stay=3
class Q:
def __init__(self,n):
self.n=n
def __str__(self):
return 'q'+str(self.n)
def __eq__(self, other):
return self.n == other.n
class Rule:
def __init__(self, Q, S, next_Q, next_S, pos):
self.Q=Q
self.S=S
self.next_Q=next_Q
self.next_S=next_S
self.pos=pos
def __str__(self):
return str(self.Q) + ", " + self.S + " -> " + \
str(self.next_Q) + ", " + self.next_S + ", " + str(self.pos.name)
class TuringMachine:
def __init__(self, rules, curr_Q, final_Q):
self.rules= rules
self.curr_Q= curr_Q
self.final_Q= final_Q
def __str__(self):
out = ""
for r in self.rules:
out = out + str(r) + '\n'
return out
def check(self, tape):
print(tape, " - ", self.curr_Q )
while self.curr_Q != self.final_Q:
# print(self.curr_Q, '!=', self.final_Q)
for r in self.rules:
# print(r)
# print("Сравнение: ", self.curr_Q, r.Q)
# print("Сравнение: ", tape.curr_symbol(), r.S)
if self.curr_Q == r.Q and tape.curr_symbol() == r.S:
tape.change_curr_symbol(r.next_S)
tape.move_to(r.pos)
self.curr_Q= r.next_Q
print(tape, " - ", self.curr_Q )
break
# print("Сравнение: ", self.rules.index(r), self.rules[len(self.rules)-1])
if r == self.rules[len(self.rules)-1]: # собственно вылeт, если вообще не нашли правил
return False
return True
|
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for recsim.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import test_util
from environments.recommenders import recsim_wrapper
from recsim.environments import interest_exploration
class RecommenderTest(absltest.TestCase):
def test_interest_exploration_can_run(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': False,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
def test_interest_exploration_can_run_with_resampling(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': True,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
if __name__ == '__main__':
absltest.main()
|
"""
This splitter split data for each user separately
"""
from typing import Optional, Union
import pyspark.sql.functions as sf
from pyspark.sql import DataFrame, Window
from replay.constants import AnyDataFrame
from replay.splitters.base_splitter import Splitter, SplitterReturnType
from replay.utils import convert2spark
# pylint: disable=too-few-public-methods
class UserSplitter(Splitter):
"""
Split data inside each user's history separately.
Example:
>>> from replay.session_handler import get_spark_session, State
>>> spark = get_spark_session(1, 1)
>>> state = State(spark)
>>> from replay.splitters import UserSplitter
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"user_id": [1,1,1,2,2,2],
... "item_id": [1,2,3,1,2,3],
... "relevance": [1,2,3,4,5,6],
... "timestamp": [1,2,3,3,2,1]})
>>> data_frame
user_id item_id relevance timestamp
0 1 1 1 1
1 1 2 2 2
2 1 3 3 3
3 2 1 4 3
4 2 2 5 2
5 2 3 6 1
By default, test is one last item for each user
>>> UserSplitter(seed=80083).split(data_frame)[-1].toPandas()
user_id item_id relevance timestamp
0 1 3 3 3
1 2 1 4 3
Random records can be retrieved with ``shuffle``:
>>> UserSplitter(shuffle=True, seed=80083).split(data_frame)[-1].toPandas()
user_id item_id relevance timestamp
0 1 2 2 2
1 2 3 6 1
You can specify the number of items for each user:
>>> UserSplitter(item_test_size=3, shuffle=True, seed=80083).split(data_frame)[-1].toPandas()
user_id item_id relevance timestamp
0 1 2 2 2
1 1 3 3 3
2 1 1 1 1
3 2 3 6 1
4 2 2 5 2
5 2 1 4 3
Or a fraction:
>>> UserSplitter(item_test_size=0.67, shuffle=True, seed=80083).split(data_frame)[-1].toPandas()
user_id item_id relevance timestamp
0 1 2 2 2
1 1 3 3 3
2 2 3 6 1
3 2 2 5 2
`user_test_size` allows to put exact number of users into test set
>>> UserSplitter(user_test_size=1, item_test_size=2, seed=42).split(data_frame)[-1].toPandas().user_id.nunique()
1
>>> UserSplitter(user_test_size=0.5, item_test_size=2, seed=42).split(data_frame)[-1].toPandas().user_id.nunique()
1
"""
# pylint: disable=too-many-arguments
def __init__(
self,
item_test_size: Union[float, int] = 1,
user_test_size: Optional[Union[float, int]] = None,
shuffle=False,
drop_cold_items: bool = False,
drop_cold_users: bool = False,
seed: Optional[int] = None,
):
"""
:param item_test_size: fraction or a number of items per user
:param user_test_size: similar to ``item_test_size``,
but corresponds to the number of users.
``None`` is all available users.
:param shuffle: take random items and not last based on ``timestamp``.
:param drop_cold_items: flag to drop cold items from test
:param drop_cold_users: flag to drop cold users from test
:param seed: random seed
"""
super().__init__(
drop_cold_items=drop_cold_items, drop_cold_users=drop_cold_users
)
self.item_test_size = item_test_size
self.user_test_size = user_test_size
self.shuffle = shuffle
self.seed = seed
def _get_test_users(self, log: DataFrame,) -> DataFrame:
"""
:param log: input DataFrame
:return: Spark DataFrame with single column `user_id`
"""
all_users = log.select("user_id").distinct()
user_count = all_users.count()
if self.user_test_size is not None:
value_error = False
if isinstance(self.user_test_size, int):
if 1 <= self.user_test_size < user_count:
test_user_count = self.user_test_size
else:
value_error = True
else:
if 1 > self.user_test_size > 0:
test_user_count = user_count * self.user_test_size
else:
value_error = True
if value_error:
raise ValueError(
f"""
Invalid value for user_test_size: {self.user_test_size}
"""
)
test_users = (
all_users.withColumn("rand", sf.rand(self.seed))
.withColumn(
"row_num", sf.row_number().over(Window.orderBy("rand"))
)
.filter(f"row_num <= {test_user_count}")
.drop("rand", "row_num")
)
else:
test_users = all_users
return test_users
def _split_proportion(self, log: DataFrame) -> SplitterReturnType:
"""
Proportionate split
:param log: input DataFrame `[timestamp, user_id, item_id, relevance]`
:return: train and test DataFrames
"""
counts = log.groupBy("user_id").count()
test_users = self._get_test_users(log).withColumn(
"test_user", sf.lit(1)
)
if self.shuffle:
res = self._add_random_partition(
log.join(test_users, how="left", on="user_id")
)
else:
res = self._add_time_partition(
log.join(test_users, how="left", on="user_id")
)
res = res.join(counts, on="user_id", how="left")
res = res.withColumn("frac", sf.col("row_num") / sf.col("count"))
train = res.filter(
f"""
frac > {self.item_test_size} OR
test_user IS NULL
"""
).drop("rand", "row_num", "count", "frac", "test_user")
test = res.filter(
f"""
frac <= {self.item_test_size} AND
test_user IS NOT NULL
"""
).drop("rand", "row_num", "count", "frac", "test_user")
return train, test
def _split_quantity(self, log: DataFrame) -> SplitterReturnType:
"""
Split by quantity
:param log: input DataFrame `[timestamp, user_id, item_id, relevance]`
:return: train and test DataFrames
"""
test_users = self._get_test_users(log).withColumn(
"test_user", sf.lit(1)
)
if self.shuffle:
res = self._add_random_partition(
log.join(test_users, how="left", on="user_id")
)
else:
res = self._add_time_partition(
log.join(test_users, how="left", on="user_id")
)
train = res.filter(
f"""
row_num > {self.item_test_size} OR
test_user IS NULL
"""
).drop("rand", "row_num", "test_user")
test = res.filter(
f"""
row_num <= {self.item_test_size} AND
test_user IS NOT NULL
"""
).drop("rand", "row_num", "test_user")
return train, test
def _core_split(self, log: DataFrame) -> SplitterReturnType:
if 0 <= self.item_test_size < 1.0:
train, test = self._split_proportion(log)
elif self.item_test_size >= 1 and isinstance(self.item_test_size, int):
train, test = self._split_quantity(log)
else:
raise ValueError(
"`test_size` value must be [0, 1) or "
"a positive integer; "
f"test_size={self.item_test_size}"
)
return train, test
def _add_random_partition(self, dataframe: DataFrame) -> DataFrame:
"""
Adds `rand` column and a user index column `row_num` based on `rand`.
:param dataframe: input DataFrame with `user_id` column
:returns: processed DataFrame
"""
dataframe = dataframe.withColumn("rand", sf.rand(self.seed))
dataframe = dataframe.withColumn(
"row_num",
sf.row_number().over(
Window.partitionBy("user_id").orderBy("rand")
),
)
return dataframe
@staticmethod
def _add_time_partition(dataframe: DataFrame) -> DataFrame:
"""
Adds user index `row_num` based on `timestamp`.
:param dataframe: input DataFrame with `[timestamp, user_id]`
:returns: processed DataFrame
"""
res = dataframe.withColumn(
"row_num",
sf.row_number().over(
Window.partitionBy("user_id").orderBy(
sf.col("timestamp").desc()
)
),
)
return res
def k_folds(
log: AnyDataFrame,
n_folds: Optional[int] = 5,
seed: Optional[int] = None,
splitter: Optional[str] = "user",
) -> SplitterReturnType:
"""
Splits log inside each user into folds at random
:param log: input DataFrame
:param n_folds: number of folds
:param seed: random seed
:param splitter: splitting strategy. Only user variant is available atm.
:return: yields train and test DataFrames by folds
"""
if splitter not in {"user"}:
raise ValueError(f"Wrong splitter parameter: {splitter}")
if splitter == "user":
dataframe = convert2spark(log).withColumn("rand", sf.rand(seed))
dataframe = dataframe.withColumn(
"fold",
sf.row_number().over(Window.partitionBy("user_id").orderBy("rand"))
% n_folds,
).drop("rand")
for i in range(n_folds):
train = dataframe.filter(f"fold != {i}").drop("fold")
test = dataframe.filter(f"fold == {i}").drop("fold")
yield train, test
|
"""## The Home Page of awesome-panel.org"""
import pathlib
from panel import Column
from awesome_panel.express._pane._panes import Markdown
HOME_PATH = pathlib.Path(__file__).parent / "home.md"
def view() -> Column:
"""The home view of awesome-panel.org"""
return Column(Markdown(path=HOME_PATH), name="Home", sizing_mode="stretch_width")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Lucas Heitzmann Gabrielli.
# This file is part of gdstk, distributed under the terms of the
# Boost Software License - Version 1.0. See the accompanying
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt>
import gdstk
def test_properties():
for obj in [
gdstk.Polygon([-1 + 0j, -2j, 3 + 0j, 4j]),
gdstk.FlexPath((0j, 1j), 0.1),
gdstk.RobustPath(0j, 0.1),
gdstk.Label("Label", 0j),
gdstk.Reference("EMPTY"),
]:
assert obj.get_property(12) is None
assert obj.delete_property(12) is obj
obj.set_property(13, "Property text")
assert obj.get_property(12) is None
assert obj.get_property(13) == "Property text"
obj.delete_property(13)
assert obj.get_property(13) is None
obj.set_property(13, "Second text")
obj.set_property(13, "Third text")
assert obj.get_property(13) == "Third text"
|
# -*- coding: utf-8 -*-
import sys
import os
import timeit
import signal
from collections import deque
from operator import methodcaller
timer = timeit.default_timer
class NotMainThreadError(Exception):
'''pyinstrument must be used on the main thread in signal mode'''
def __init__(self, message=''):
super(NotMainThreadError, self).__init__(message or NotMainThreadError.__doc__)
class SignalUnavailableError(Exception):
'''pyinstrument uses signal.SIGALRM in signal mode, which is not available on your system.
You can pass the argument 'use_signal=False' to run in setprofile mode.'''
def __init__(self, message=''):
super(SignalUnavailableError, self).__init__(message or SignalUnavailableError.__doc__)
class Profiler(object):
def __init__(self, use_signal=True):
if use_signal:
try:
signal.SIGALRM
except AttributeError:
raise SignalUnavailableError()
self.interval = 0.001
self.last_profile_time = 0
self.stack_self_time = {}
self.use_signal = use_signal
def start(self):
self.last_profile_time = timer()
if self.use_signal:
try:
signal.signal(signal.SIGALRM, self._signal)
# the following tells the system to restart interrupted system calls if they are
# interrupted before any data has been transferred. This avoids many of the problems
# related to signals interrupting system calls, see issue #16
signal.siginterrupt(signal.SIGALRM, False)
except ValueError:
raise NotMainThreadError()
signal.setitimer(signal.ITIMER_REAL, self.interval, 0.0)
else:
sys.setprofile(self._profile)
def stop(self):
if self.use_signal:
signal.setitimer(signal.ITIMER_REAL, 0.0, 0.0)
try:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
except ValueError:
raise NotMainThreadError()
else:
sys.setprofile(None)
def _signal(self, signum, frame):
now = timer()
time_since_last_signal = now - self.last_profile_time
self._record(frame, time_since_last_signal)
signal.setitimer(signal.ITIMER_REAL, self.interval, 0.0)
self.last_profile_time = now
def _profile(self, frame, event, arg):
now = timer()
time_since_last_signal = now - self.last_profile_time
if time_since_last_signal < self.interval:
return
if event == 'call':
frame = frame.f_back
self._record(frame, time_since_last_signal)
self.last_profile_time = now
def _record(self, frame, time):
stack = self._call_stack_for_frame(frame)
self.stack_self_time[stack] = self.stack_self_time.get(stack, 0) + time
def _call_stack_for_frame(self, frame):
result_list = deque()
while frame is not None:
result_list.appendleft(self._identifier_for_frame(frame))
frame = frame.f_back
return tuple(result_list)
def _identifier_for_frame(self, frame):
# we use a string here as a tuple hashes slower and this is used as a key in a dictionary
return '%s\x00%s\x00%i' % (
frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno
)
def root_frame(self):
"""
Returns the parsed results in the form of a tree of Frame objects
"""
if not hasattr(self, '_root_frame'):
self._root_frame = Frame()
# define a recursive function that builds the hierarchy of frames given the
# stack of frame identifiers
def frame_for_stack(stack):
if len(stack) == 0:
return self._root_frame
parent = frame_for_stack(stack[:-1])
frame_name = stack[-1]
if frame_name not in parent.children_dict:
parent.add_child(Frame(frame_name, parent))
return parent.children_dict[frame_name]
for stack, self_time in self.stack_self_time.items():
frame_for_stack(stack).self_time = self_time
return self._root_frame
def first_interesting_frame(self):
"""
Traverse down the frame hierarchy until a frame is found with more than one child
"""
frame = self.root_frame()
while len(frame.children) <= 1:
if frame.children:
frame = frame.children[0]
else:
# there are no branches
return self.root_frame()
return frame
def starting_frame(self, root=False):
return self.root_frame() if root else self.first_interesting_frame()
def output_text(self, root=False, unicode=False, color=False):
return self.starting_frame(root=root).as_text(unicode=unicode, color=color)
def output_html(self, root=False):
resources_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources/')
with open(os.path.join(resources_dir, 'style.css')) as f:
css = f.read()
with open(os.path.join(resources_dir, 'profile.js')) as f:
js = f.read()
with open(os.path.join(resources_dir, 'jquery-1.11.0.min.js')) as f:
jquery_js = f.read()
body = self.starting_frame(root).as_html()
return '''
<html>
<head>
<style>{css}</style>
<script>{jquery_js}</script>
</head>
<body>
{body}
<script>{js}</script>
</body>
</html>'''.format(
css=css, js=js, jquery_js=jquery_js, body=body
)
class Frame(object):
"""
Object that represents a stack frame in the parsed tree
"""
def __init__(self, identifier='', parent=None):
self.identifier = identifier
self.parent = parent
self.children_dict = {}
self.self_time = 0
@property
def function(self):
if self.identifier:
return self.identifier.split('\x00')[0]
@property
def file_path(self):
if self.identifier:
return self.identifier.split('\x00')[1]
@property
def line_no(self):
if self.identifier:
return int(self.identifier.split('\x00')[2])
@property
def file_path_short(self):
""" Return the path resolved against the closest entry in sys.path """
if not hasattr(self, '_file_path_short'):
if self.file_path:
result = None
for path in sys.path:
# On Windows, if self.file_path and path are on different drives, relpath
# will result in exception, because it cannot compute a relpath in this case.
# The root cause is that on Windows, there is no root dir like '/' on Linux.
try:
candidate = os.path.relpath(self.file_path, path)
except ValueError:
continue
if not result or (len(candidate.split(os.sep)) < len(result.split(os.sep))):
result = candidate
self._file_path_short = result
else:
self._file_path_short = None
return self._file_path_short
@property
def is_application_code(self):
if self.identifier:
return ('%slib%s' % (os.sep, os.sep)) not in self.file_path
@property
def code_position_short(self):
if self.identifier:
return '%s:%i' % (self.file_path_short, self.line_no)
# stylistically I'd rather this was a property, but using @property appears to use twice
# as many stack frames, so I'm forced into using a function since this method is recursive
# down the call tree.
def time(self):
if not hasattr(self, '_time'):
# can't use a sum(<generator>) expression here sadly, because this method
# recurses down the call tree, and the generator uses an extra stack frame,
# meaning we hit the stack limit when the profiled code is 500 frames deep.
self._time = self.self_time
for child in self.children:
self._time += child.time()
return self._time
@property
def proportion_of_parent(self):
if not hasattr(self, '_proportion_of_parent'):
if self.parent and self.time():
try:
self._proportion_of_parent = self.time() / self.parent.time()
except ZeroDivisionError:
self._proportion_of_parent = float('nan')
else:
self._proportion_of_parent = 1.0
return self._proportion_of_parent
@property
def proportion_of_total(self):
if not hasattr(self, '_proportion_of_total'):
if not self.parent:
self._proportion_of_total = 1.0
else:
self._proportion_of_total = self.parent.proportion_of_total * self.proportion_of_parent
return self._proportion_of_total
@property
def children(self):
return list(self.children_dict.values())
@property
def sorted_children(self):
if not hasattr(self, '_sorted_children'):
self._sorted_children = sorted(self.children, key=methodcaller('time'), reverse=True)
return self._sorted_children
def add_child(self, child):
self.children_dict[child.identifier] = child
def as_text(self, indent=u'', child_indent=u'', unicode=False, color=False):
colors = colors_enabled if color else colors_disabled
time_str = '{:.3f}'.format(self.time())
if color:
time_str = self._ansi_color_for_time() + time_str + colors.end
result = u'{indent}{time_str} {function} {c.faint}{code_position}{c.end}\n'.format(
indent=indent,
time_str=time_str,
function=self.function,
code_position=self.code_position_short,
c=colors_enabled if color else colors_disabled)
children = [f for f in self.sorted_children if f.proportion_of_total > 0.01]
if children:
last_child = children[-1]
for child in children:
if child is not last_child:
c_indent = child_indent + (u'├─ ' if unicode else '|- ')
cc_indent = child_indent + (u'│ ' if unicode else '| ')
else:
c_indent = child_indent + (u'└─ ' if unicode else '`- ')
cc_indent = child_indent + u' '
result += child.as_text(indent=c_indent,
child_indent=cc_indent,
unicode=unicode,
color=color)
return result
def as_html(self):
start_collapsed = all(child.proportion_of_total < 0.1 for child in self.children)
extra_class = '' + ('collapse ' if start_collapsed else '')
extra_class += 'no_children ' if not self.children else ''
extra_class += 'application ' if self.is_application_code else ''
result = '''<div class="frame {extra_class}" data-time="{time}" date-parent-time="{parent_proportion}">
<div class="frame-info">
<span class="time">{time:.3f}s</span>
<span class="total-percent">{total_proportion:.1%}</span>
<!--<span class="parent-percent">{parent_proportion:.1%}</span>-->
<span class="function">{function}</span>
<span class="code-position">{code_position}</span>
</div>'''.format(
time=self.time(),
function=self.function,
code_position=self.code_position_short,
parent_proportion=self.proportion_of_parent,
total_proportion=self.proportion_of_total,
extra_class=extra_class)
result += '<div class="frame-children">'
# add this filter to prevent the output file getting too large
children = [f for f in self.sorted_children if f.proportion_of_total > 0.005]
for child in children:
result += child.as_html()
result += '</div></div>'
return result
def _ansi_color_for_time(self):
colors = colors_enabled
if self.proportion_of_total > 0.6:
return colors.red
elif self.proportion_of_total > 0.2:
return colors.yellow
elif self.proportion_of_total > 0.05:
return colors.green
else:
return colors.bright_green + colors.faint
def __repr__(self):
return 'Frame(identifier=%s, time=%f, children=%r)' % (self.identifier, self.time(), self.children)
class colors_enabled:
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
cyan = '\033[36m'
bright_green = '\033[92m'
bold = '\033[1m'
faint = '\033[2m'
end = '\033[0m'
class colors_disabled:
def __getattr__(self, key):
return ''
colors_disabled = colors_disabled()
|
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.10589,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.28586,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.567184,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.28115,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.486851,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.279223,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.04722,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.190947,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.26188,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.107153,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0101919,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.113533,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0753755,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.220686,
'Execution Unit/Register Files/Runtime Dynamic': 0.0855675,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.303352,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.750786,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.57481,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000571847,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000571847,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00049433,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000189314,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00108278,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0027208,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00561671,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0724604,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.60911,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.163419,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.246108,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.05474,
'Instruction Fetch Unit/Runtime Dynamic': 0.490326,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0973889,
'L2/Runtime Dynamic': 0.00762178,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.12065,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.39387,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0932891,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.093289,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.56298,
'Load Store Unit/Runtime Dynamic': 1.94723,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.230035,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.46007,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0816402,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0830985,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.286577,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0268029,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.586216,
'Memory Management Unit/Runtime Dynamic': 0.109901,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 23.1249,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.373833,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0188749,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.140279,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.532987,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.66288,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.039984,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.234094,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.214165,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0937629,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.151236,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0763389,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.321338,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.074403,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.34669,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0404604,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00393284,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0434803,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0290857,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0839407,
'Execution Unit/Register Files/Runtime Dynamic': 0.0330186,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.101599,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.252449,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.24628,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000288308,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000288308,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000264583,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00010979,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000417819,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00125902,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00228312,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0279609,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.77855,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.066034,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0949676,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.08338,
'Instruction Fetch Unit/Runtime Dynamic': 0.192505,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0376659,
'L2/Runtime Dynamic': 0.00341304,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.32901,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.528657,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0353253,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0353253,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.49582,
'Load Store Unit/Runtime Dynamic': 0.738195,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0871061,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.174212,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0309142,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0314783,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.110584,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0108301,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.319798,
'Memory Management Unit/Runtime Dynamic': 0.0423084,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.8728,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.106433,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00552559,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0460561,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.158015,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.38071,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0417827,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.235506,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.223799,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0961278,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.155051,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0782643,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.329443,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0756295,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.36612,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0422805,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00403203,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0448739,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0298193,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0871544,
'Execution Unit/Register Files/Runtime Dynamic': 0.0338514,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.104985,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.261481,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.26566,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000269823,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000269823,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000247891,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000103005,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000428357,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00121589,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00212699,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0286661,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.82341,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0673702,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0973629,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.13042,
'Instruction Fetch Unit/Runtime Dynamic': 0.196742,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0382766,
'L2/Runtime Dynamic': 0.00320622,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.38326,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.554323,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0370805,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0370804,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.55837,
'Load Store Unit/Runtime Dynamic': 0.774271,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0914341,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.182868,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0324503,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0330234,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.113373,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0110494,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.325226,
'Memory Management Unit/Runtime Dynamic': 0.0440728,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.0079,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.111221,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00569056,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0471958,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.164107,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.44806,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0393435,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.233591,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.210737,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0937014,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.151137,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0762888,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.321127,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0748591,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.34153,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0398127,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00393026,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0432208,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0290667,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0830335,
'Execution Unit/Register Files/Runtime Dynamic': 0.0329969,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.100892,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.251214,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.24431,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000293528,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000293528,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000269197,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000111613,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000417545,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0012738,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00233075,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0279425,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.77739,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0650829,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0949054,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.08216,
'Instruction Fetch Unit/Runtime Dynamic': 0.191535,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0377626,
'L2/Runtime Dynamic': 0.00361763,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.32351,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.526389,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0351473,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0351475,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.48948,
'Load Store Unit/Runtime Dynamic': 0.734872,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0866674,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.173335,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0307585,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0313242,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.110511,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0106741,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.319458,
'Memory Management Unit/Runtime Dynamic': 0.0419983,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.8599,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.104729,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00550208,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0460735,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.156305,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.37263,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 8.244757154258606,
'Runtime Dynamic': 8.244757154258606,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.402882,
'Runtime Dynamic': 0.0327367,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 68.2683,
'Peak Power': 101.381,
'Runtime Dynamic': 12.897,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 67.8655,
'Total Cores/Runtime Dynamic': 12.8643,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.402882,
'Total L3s/Runtime Dynamic': 0.0327367,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} |
import re
def find_patterns(patterns, string):
'''
Takes in a list of regex patterns
Prints a list of all matches
'''
print(f"Searching the string: '{string}'.")
for pattern in patterns:
print(f"\nWith the regex: {pattern}")
print(f"Result: {re.findall(pattern, string)}")
string = 'a b ab aa aaa abb aba'
patterns = ['[ab]', 'a[ab]+']
# '[ab]' - either a or b
# 'a[ab]+' - a followed by 1 or more a or b
find_patterns(patterns, string)
|
from ds.tree.binary_tree import BinaryTreeNode
from ds.tree.tree_core import Tree
import math
'''
Given a binary tree in which each node element contains a number.
Find the maximum possible sum from one leaf node to another.
'''
def get_max_path_leaves(root: BinaryTreeNode):
'''
Returns (first, second) where
first = Max path from root to any of its leaves.
second = Max path from one leaf to another in subtree rooted at root.
So second is the final answer we want.
'''
if root is None:
# CAREFUL: Note the base cases; it cant be 0, 0
return -math.inf, -math.inf
if root.left is None and root.right is None:
return root.data, -math.inf
sub_ans = None
# CAREFUL: We are handling one child null case separately.
if root.left is None:
sub_ans = get_max_path_leaves(root.right)
if root.right is None:
sub_ans = get_max_path_leaves(root.left)
if sub_ans is not None:
return sub_ans[0] + root.data, sub_ans[1]
max_path_leaf_left, max_path_left = get_max_path_leaves(root.left)
max_path_leaf_right, max_path_right = get_max_path_leaves(root.right)
# It could be that the 2 leaves both are in the left subtree.
ans1 = max_path_left
# It could be that the 2 leaves both are in the right subtree.
ans2 = max_path_right
# Or both leaves are on either side of root; meaning root is in the max path.
ans3 = max_path_leaf_left + max_path_leaf_right + root.data
return max(max_path_leaf_left, max_path_leaf_right) + root.data, max(
ans1, ans2, ans3)
def main():
root = BinaryTreeNode(1)
root.left = BinaryTreeNode(2)
root.right = BinaryTreeNode(3)
root2 = BinaryTreeNode(100)
root2.left = root
print(Tree(root2))
print(get_max_path_leaves(root2))
main()
|
iot23_scenarios_dir = 'E:\\machine-learning\\datasets\\iot23\\1_scenarios\\'
iot23_attacks_dir = 'E:\\machine-learning\\datasets\\iot23\\2_attacks\\'
iot23_data_dir = 'E:\\machine-learning\\datasets\\iot23\\3_data_v2\\'
iot23_experiments_dir = 'E:\\machine-learning\\datasets\\iot23\\4_experiments_v2\\'
|
"""
PRIME1 - Prime Generator
Peter wants to generate some prime numbers for his crypto-system. Help him! Your task is to generate all prime numbers
between two given numbers!
Input
The input begins with the number t of test cases in a single line (t<=10). In each of the next t lines there are two
numbers m and n (1 <= m <= n <= 1000000000, n-m<=100000) separated by a space.
Output
For every test case print all prime numbers p such that m <= p <= n, one number per line, test cases separated by an
empty line.
Example
Input:
2
1 10
3 5
Output:
2
3
5
7
3
5
Warning: large Input/Output data, be careful with certain languages (though most should be OK if the algorithm is well designed)
Author : Anirudh Sai Mergu
Website: www.anirudhmergu.com
Question URL: http://www.spoj.com/problems/PRIME1/
"""
from math import sqrt
primes = [2]
for i in range(3, 32000, 2):
is_prime = True
cap = sqrt(i)+1
for j in primes:
if j >= cap:
break
if i % j == 0:
is_prime = False
break
if is_prime:
primes.append(i)
T = input()
output = ""
for t in range(int(T)):
if t > 0:
output += "\n"
M, N = input().split(' ')
M = int(M)
N = int(N)
cap = sqrt(N)+1
if M < 2:
M = 2
is_prime = [True]*100001
for i in primes:
if i >= cap:
break
if i >= M:
start = i*2
else:
start = M + ((i - M % i) % i)
# The two below, obscure lines create a continuous
# block of false elements in order to set all
# elements corresponding to numbers divisible by i
# in is_prime to be false
# In turns out that this runs substantially faster
# than setting the elements individually using loops
false_block = [False] * len(is_prime[start-M:N+1-M:i])
is_prime[start-M:N+1-M:i] = false_block
for i in range(M, N+1):
if is_prime[i-M] is True:
output += str(i) + "\n"
print(output[:-1])
|
number = int(input())
sum = 0
while number > 0:
digit = number % 10
sum += digit
number = number // 10
print(sum) |
import ctypes
from pyparallelproj.config import lib_parallelproj_c, lib_parallelproj_cuda, n_visible_gpus
def calc_chunks(nLORs, n_chunks):
""" calculate indices to split an array of length nLORs into n_chunks chunks
example: splitting an array of length 10 into 3 chunks returns [0,4,7,10]
"""
rem = nLORs % n_chunks
div = (nLORs // n_chunks)
chunks = [0]
for i in range(n_chunks):
if i < rem:
nLORs_chunck = div + 1
else:
nLORs_chunck = div
chunks.append(chunks[i] + nLORs_chunck)
return chunks
#------------------
def joseph3d_fwd(xstart, xend, img, img_origin, voxsize, img_fwd, nLORs, img_dim,
threadsperblock = 64, n_chunks = 1):
if n_visible_gpus > 0:
nvox = ctypes.c_longlong(img_dim[0]*img_dim[1]*img_dim[2])
# send image to all devices
d_img = lib_parallelproj_cuda.copy_float_array_to_all_devices(img.ravel(), nvox)
# split call to GPU lib into chunks (useful for systems with limited memory)
ic = calc_chunks(nLORs, n_chunks)
for i in range(n_chunks):
ok = lib_parallelproj_cuda.joseph3d_fwd_cuda(xstart[(3*ic[i]):(3*ic[i+1])], xend[(3*ic[i]):(3*ic[i+1])],
d_img, img_origin, voxsize,
img_fwd[ic[i]:ic[i+1]], ic[i+1] - ic[i],
img_dim, threadsperblock)
# free image device arrays
lib_parallelproj_cuda.free_float_array_on_all_devices(d_img, nvox)
else:
ok = lib_parallelproj_c.joseph3d_fwd(xstart, xend, img, img_origin, voxsize, img_fwd, nLORs, img_dim)
return ok
#------------------
def joseph3d_back(xstart, xend, back_img, img_origin, voxsize, sino, nLORs, img_dim,
threadsperblock = 64, n_chunks = 1):
if n_visible_gpus > 0:
nvox = ctypes.c_longlong(img_dim[0]*img_dim[1]*img_dim[2])
# send image to all devices
d_back_img = lib_parallelproj_cuda.copy_float_array_to_all_devices(back_img, nvox)
# split call to GPU lib into chunks (useful for systems with limited memory)
ic = calc_chunks(nLORs, n_chunks)
for i in range(n_chunks):
ok = lib_parallelproj_cuda.joseph3d_back_cuda(xstart[(3*ic[i]):(3*ic[i+1])], xend[(3*ic[i]):(3*ic[i+1])],
d_back_img, img_origin, voxsize,
sino[ic[i]:ic[i+1]], ic[i+1] - ic[i],
img_dim, threadsperblock)
# sum all device arrays in the first device
lib_parallelproj_cuda.sum_float_arrays_on_first_device(d_back_img, nvox)
# copy summed image back from first device
lib_parallelproj_cuda.get_float_array_from_device(d_back_img, nvox, 0, back_img)
# free image device arrays
lib_parallelproj_cuda.free_float_array_on_all_devices(d_back_img, nvox)
else:
ok = lib_parallelproj_c.joseph3d_back(xstart, xend, back_img, img_origin, voxsize, sino, nLORs, img_dim)
return ok
#------------------
def joseph3d_fwd_tof_sino(xstart, xend, img, img_origin, voxsize, img_fwd, nLORs, img_dim,
tofbin_width, sigma_tof, tofcenter_offset, nsigmas, ntofbins,
threadsperblock = 64, n_chunks = 1):
if n_visible_gpus > 0:
nvox = ctypes.c_longlong(img_dim[0]*img_dim[1]*img_dim[2])
# send image to all devices
d_img = lib_parallelproj_cuda.copy_float_array_to_all_devices(img.ravel(), nvox)
# split call to GPU lib into chunks (useful for systems with limited memory)
ic = calc_chunks(nLORs, n_chunks)
for i in range(n_chunks):
ok = lib_parallelproj_cuda.joseph3d_fwd_tof_sino_cuda(xstart[(3*ic[i]):(3*ic[i+1])],
xend[(3*ic[i]):(3*ic[i+1])],
d_img, img_origin, voxsize,
img_fwd[(ntofbins*ic[i]):(ntofbins*ic[i+1])],
ic[i+1] - ic[i], img_dim,
tofbin_width, sigma_tof[ic[i]:ic[i+1]],
tofcenter_offset[ic[i]:ic[i+1]],
nsigmas, ntofbins, threadsperblock)
# free image device arrays
lib_parallelproj_cuda.free_float_array_on_all_devices(d_img, nvox)
else:
ok = lib_parallelproj_c.joseph3d_fwd_tof_sino(xstart, xend, img, img_origin, voxsize,
img_fwd, nLORs, img_dim,
tofbin_width, sigma_tof, tofcenter_offset,
nsigmas, ntofbins)
return ok
#------------------
def joseph3d_back_tof_sino(xstart, xend, back_img, img_origin, voxsize, sino, nLORs, img_dim,
tofbin_width, sigma_tof, tofcenter_offset, nsigmas, ntofbins,
threadsperblock = 64, n_chunks = 1):
if n_visible_gpus > 0:
nvox = ctypes.c_longlong(img_dim[0]*img_dim[1]*img_dim[2])
# send image to all devices
d_back_img = lib_parallelproj_cuda.copy_float_array_to_all_devices(back_img, nvox)
# split call to GPU lib into chunks (useful for systems with limited memory)
ic = calc_chunks(nLORs, n_chunks)
for i in range(n_chunks):
ok = lib_parallelproj_cuda.joseph3d_back_tof_sino_cuda(xstart[(3*ic[i]):(3*ic[i+1])],
xend[(3*ic[i]):(3*ic[i+1])],
d_back_img, img_origin, voxsize,
sino[(ntofbins*ic[i]):(ntofbins*ic[i+1])],
ic[i+1] - ic[i], img_dim,
tofbin_width, sigma_tof[ic[i]:ic[i+1]],
tofcenter_offset[ic[i]:ic[i+1]],
nsigmas, ntofbins, threadsperblock)
# sum all device arrays in the first device
lib_parallelproj_cuda.sum_float_arrays_on_first_device(d_back_img, nvox)
# copy summed image back from first device
lib_parallelproj_cuda.get_float_array_from_device(d_back_img, nvox, 0, back_img)
# free image device arrays
lib_parallelproj_cuda.free_float_array_on_all_devices(d_back_img, nvox)
else:
ok = lib_parallelproj_c.joseph3d_back_tof_sino(xstart, xend, back_img, img_origin, voxsize,
sino, nLORs, img_dim,
tofbin_width, sigma_tof, tofcenter_offset,
nsigmas, ntofbins)
return ok
#------------------
def joseph3d_fwd_tof_lm(xstart, xend, img, img_origin, voxsize, img_fwd, nLORs, img_dim,
tofbin_width, sigma_tof, tofcenter_offset, nsigmas, tofbin,
threadsperblock = 64, n_chunks = 1):
if n_visible_gpus > 0:
nvox = ctypes.c_longlong(img_dim[0]*img_dim[1]*img_dim[2])
# send image to all devices
d_img = lib_parallelproj_cuda.copy_float_array_to_all_devices(img.ravel(), nvox)
# split call to GPU lib into chunks (useful for systems with limited memory)
ic = calc_chunks(nLORs, n_chunks)
for i in range(n_chunks):
ok = lib_parallelproj_cuda.joseph3d_fwd_tof_lm_cuda(xstart[(3*ic[i]):(3*ic[i+1])],
xend[(3*ic[i]):(3*ic[i+1])],
d_img, img_origin, voxsize,
img_fwd[ic[i]:ic[i+1]], ic[i+1] - ic[i], img_dim,
tofbin_width, sigma_tof[ic[i]:ic[i+1]],
tofcenter_offset[ic[i]:ic[i+1]],
nsigmas, tofbin[ic[i]:ic[i+1]], threadsperblock)
# free image device arrays
lib_parallelproj_cuda.free_float_array_on_all_devices(d_img, nvox)
else:
ok = lib_parallelproj_c.joseph3d_fwd_tof_lm(xstart, xend, img, img_origin, voxsize,
img_fwd, nLORs, img_dim,
tofbin_width, sigma_tof, tofcenter_offset,
nsigmas, tofbin)
return ok
#------------------
def joseph3d_back_tof_lm(xstart, xend, back_img, img_origin, voxsize, lst, nLORs, img_dim,
tofbin_width, sigma_tof, tofcenter_offset, nsigmas, tofbin,
threadsperblock = 64, n_chunks = 1):
if n_visible_gpus > 0:
nvox = ctypes.c_longlong(img_dim[0]*img_dim[1]*img_dim[2])
# send image to all devices
d_back_img = lib_parallelproj_cuda.copy_float_array_to_all_devices(back_img, nvox)
# split call to GPU lib into chunks (useful for systems with limited memory)
ic = calc_chunks(nLORs, n_chunks)
for i in range(n_chunks):
ok = lib_parallelproj_cuda.joseph3d_back_tof_lm_cuda(xstart[(3*ic[i]):(3*ic[i+1])],
xend[(3*ic[i]):(3*ic[i+1])],
d_back_img, img_origin, voxsize,
lst[ic[i]:ic[i+1]], ic[i+1] - ic[i], img_dim,
tofbin_width, sigma_tof[ic[i]:ic[i+1]],
tofcenter_offset[ic[i]:ic[i+1]],
nsigmas, tofbin[ic[i]:ic[i+1]], threadsperblock)
# sum all device arrays in the first device
lib_parallelproj_cuda.sum_float_arrays_on_first_device(d_back_img, nvox)
# copy summed image back from first device
lib_parallelproj_cuda.get_float_array_from_device(d_back_img, nvox, 0, back_img)
# free image device arrays
lib_parallelproj_cuda.free_float_array_on_all_devices(d_back_img, nvox)
else:
ok = lib_parallelproj_c.joseph3d_back_tof_lm(xstart, xend, back_img, img_origin, voxsize,
lst, nLORs, img_dim,
tofbin_width, sigma_tof, tofcenter_offset,
nsigmas, tofbin)
return ok
|
import random
import math
import logging
from slime_mind.engine.plant import Plant
from slime_mind.engine.slime import Slime
class Sprite_man():
def __init__(self, map, config):
self.map = map
self.conf = config
def check_for_dead(self):
for x in range(self.map.columns):
for y in range(self.map.rows):
gamepiece = self.map.get(x, y)
if type(gamepiece) is Plant or type(gamepiece) is Slime:
if gamepiece.current_hp <= 0:
self.map.clear_cell(gamepiece.x,gamepiece.y)
def check_for_merge(self):
for x in range(self.map.columns):
for y in range(self.map.rows):
gamepiece = self.map.get(x, y)
if type(gamepiece) is Slime and gamepiece.ready_to_merge:
for x, y in self.map.adjacent_cells(gamepiece.x, gamepiece.y):
neighbor = self.map.get(x, y)
if type(neighbor) is Slime and neighbor.ready_to_merge and gamepiece.player_id == neighbor.player_id:
logging.getLogger().info('%s merged with %s', gamepiece.id, neighbor.id)
gamepiece.xp = math.floor(1.5 * (gamepiece.xp + neighbor.xp))
# kill the merged neighbor
self.map.clear_cell(neighbor.x, neighbor.y)
def spread_seeds(self):
for x in range(self.map.columns):
for y in range(self.map.rows):
gamepiece = self.map.get(x, y)
if type(gamepiece) is Plant and gamepiece.can_seed():
empty_adjacent_cells = self.map.adjacent_empty_cells(gamepiece.x, gamepiece.y)
# can't seed if there are no available cells
if len(empty_adjacent_cells) == 0:
continue
level_up_chance = random.randint(0, self.conf['Plant'].getint('seed_chance'))
if level_up_chance == 0:
x, y = random.choice(empty_adjacent_cells)
plant = Plant(self.conf['Plant'])
self.map.move_gamepiece(plant, x, y)
|
ix.enable_command_history()
app = ix.application
ix.api.SdkHelpers.record_offsets_items_selected(app)
ix.disable_command_history()
|
from contextlib import ExitStack
import contextlib
@contextlib.contextmanager
def first():
print('First')
yield
@contextlib.contextmanager
def second():
print('Second')
yield
for n in range(5):
with ExitStack() as stack:
stack.enter_context(first())
if n % 2 == 0:
stack.enter_context(second())
print('NUMBER: {}'.format(n))
|
import sys
sys.path.append(r"C:\src\business-library\python")
from mundusinvicte.networking.sockets.TcpClient import TcpClient
import socket
import os
class WebClient(TcpClient):
def __init__(self, socket, addr):
self.sock = socket
self.addr = addr
self.context = {} |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'selectcert.ui'
#
# Created: Mon Nov 13 12:04:45 2017
# by: PyQt4 UI code generator 4.6.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_CertForm(object):
def setupUi(self, CertForm):
CertForm.setObjectName("CertForm")
CertForm.resize(511, 237)
self.gridLayout = QtGui.QGridLayout(CertForm)
self.gridLayout.setObjectName("gridLayout")
self.cancelButton = QtGui.QPushButton(CertForm)
self.cancelButton.setObjectName("cancelButton")
self.gridLayout.addWidget(self.cancelButton, 2, 2, 1, 1)
self.listView = QtGui.QListView(CertForm)
self.listView.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.listView.setViewMode(QtGui.QListView.ListMode)
self.listView.setObjectName("listView")
self.gridLayout.addWidget(self.listView, 1, 0, 1, 4)
self.okButton = QtGui.QPushButton(CertForm)
self.okButton.setObjectName("okButton")
self.gridLayout.addWidget(self.okButton, 2, 3, 1, 1)
self.label = QtGui.QLabel(CertForm)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 2, 0, 1, 2)
self.retranslateUi(CertForm)
QtCore.QMetaObject.connectSlotsByName(CertForm)
def retranslateUi(self, CertForm):
CertForm.setWindowTitle(QtGui.QApplication.translate("CertForm", "Выбор сертификата", None, QtGui.QApplication.UnicodeUTF8))
self.cancelButton.setText(QtGui.QApplication.translate("CertForm", "Отмена", None, QtGui.QApplication.UnicodeUTF8))
self.okButton.setText(QtGui.QApplication.translate("CertForm", "Далее", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("CertForm", "Выберите сертификат", None, QtGui.QApplication.UnicodeUTF8))
|
range_list = []
while (line := input()) != '':
_, value_text = line.split(': ')
first_range, second_range = value_text.split(' or ')
a, b = map(int, first_range.split('-'))
c, d = map(int, second_range.split('-'))
range_list.append((a, b))
range_list.append((c, d))
range_list.sort()
_ = input()
my_ticket = list(map(int, input().split(',')))
_ = input()
_ = input()
error_rate = 0
while True:
try:
line = input()
except EOFError:
break
ticket = sorted(list(map(int, line.split(','))))
i, j = 0, 0
while i < len(ticket) and j < len(range_list):
if range_list[j][0] <= ticket[i] <= range_list[j][1]:
i += 1
elif ticket[i] < range_list[j][0]:
error_rate += ticket[i]
i += 1
else:
j += 1
for v in ticket[i:]:
error_rate += v
print(error_rate)
|
from ....models.models import Committee
from ...generics.delete import DeleteAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
@register_action("committee.delete")
class CommitteeDeleteAction(DeleteAction):
"""
Action to delete a committee.
"""
model = Committee()
schema = DefaultSchema(Committee()).get_delete_schema()
|
import db
import time
import configs
from sqlalchemy import Column, Integer
from sqlalchemy.orm import declarative_base, Session
class Test(declarative_base()):
__tablename__ = 'test'
id = Column(Integer, primary_key=True, autoincrement=True)
state = Column(Integer, nullable=False)
config = configs.config('configs')
db_conf = db.get_database_config(config)
db_engine = db.db_connect(db_conf)
session = Session(bind=db_engine)
session.begin()
test: Test = session \
.query(Test) \
.with_for_update(skip_locked=True) \
.filter_by(state=1) \
.first()
if test is None:
print('No entry found.')
else:
print(test.id)
test.state = 2
time.sleep(30)
session.add(test)
session.commit()
session.close()
|
from ci_framework import FlopyTestSetup, base_test_dir
import flopy
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
def test_vdf_vsc():
model_ws = f"{base_dir}_test_vdf_vsc"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
nlay = 3
nrow = 4
ncol = 5
nper = 3
m = flopy.seawat.Seawat(modelname="vdftest", model_ws=model_ws)
dis = flopy.modflow.ModflowDis(
m, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper
)
vdf = flopy.seawat.SeawatVdf(m)
# Test different variations of instantiating vsc
vsc = flopy.seawat.SeawatVsc(m)
m.write_input()
m.remove_package("VSC")
vsc = flopy.seawat.SeawatVsc(m, mt3dmuflg=0)
m.write_input()
m.remove_package("VSC")
vsc = flopy.seawat.SeawatVsc(m, mt3dmuflg=0, mtmutempspec=0)
m.write_input()
m.remove_package("VSC")
vsc = flopy.seawat.SeawatVsc(m, mt3dmuflg=-1)
m.write_input()
m.remove_package("VSC")
vsc = flopy.seawat.SeawatVsc(m, mt3dmuflg=-1, nsmueos=1)
m.write_input()
m.remove_package("VSC")
vsc = flopy.seawat.SeawatVsc(m, mt3dmuflg=1)
m.write_input()
m.remove_package("VSC")
return
if __name__ == "__main__":
test_vdf_vsc()
|
"""Miscellaneous functions used in mechpy."""
import numpy as np
def matrix_minor(matrix, indices):
"""Return the minor of a 2D ``numpy.ndarray``.
Parameters
----------
matrix : 2D numpy.ndarray
the matrix from which to find a minor
indices : tuple of ints
a tuple of two values indicating the row and columns for the minor
Returns
-------
numpy.ndarray
the matrix minor
Raises
------
ValueError
If the supplied ``indices`` is not a tuple of two integers or if
``matrix`` is not square
"""
# error checking
if len(indices) != 2:
raise ValueError("`indices` must be a tuple of two integers")
if matrix.ndim != 2 or len(matrix) != len(matrix[0]):
raise ValueError("`matrix` must be a square 2D array")
return np.delete(
np.delete(matrix, indices[0], axis=0), indices[1], axis=1
)
def ms(applied, allowed, knockdown=1):
r"""Calculate the margin of safety.
The margin of safety is calculated according to the following formula:
.. math::
MS = \frac{F_{\text{app}}}{k F_{alw}} - 1
where
+------------------------+------------------+
| :math:`MS` | margin of safety |
+------------------------+------------------+
| :math:`F_{\text{app}}` | applied load |
+------------------------+------------------+
| :math:`F_{\text{alw}}` | allowable |
+------------------------+------------------+
.. note::
The ``applied`` and ``allowed`` values must have the same units.
Parameters
----------
applied : float
applied stress, load, or strain
allowed : float
the allowable strength
knockdown : float
allowable knockdown factor (``default=1``)
Returns
-------
float
the margin of safety
Notes
-----
It is also possible to calculate a margin of safety using a failure index
by substituting the failure index in for the ``applied`` value and setting
the ``allowed`` value to ``1``
"""
return (applied / (knockdown * allowed)) - 1
|
"""
This file copied from the OpenAI Baseline and slightly adjusted (for stable loss function, see results -> ppo2)
Source: https://github.com/openai/baselines
"""
import tensorflow as tf
import numpy as np
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype):
# lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def fc(input_shape, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.name_scope(scope):
layer = tf.keras.layers.Dense(units=nh, kernel_initializer=ortho_init(init_scale),
bias_initializer=tf.keras.initializers.Constant(init_bias))
layer.build(input_shape)
return layer
class Pd(object):
"""
A particular probability distribution
"""
def logp(self, x):
return - self.neglogp(x)
def get_shape(self):
return self.flatparam().shape
@property
def shape(self):
return self.get_shape()
def __getitem__(self, idx):
return self.__class__(self.flatparam()[idx])
class PdType(tf.Module):
"""
Parametrized family of probability distributions
"""
def pdfromflat(self, flat):
return self.pdclass()(flat)
def __eq__(self, other):
return (type(self) == type(other)) and (self.__dict__ == other.__dict__)
class CategoricalPdType(PdType):
def __init__(self, latent_shape, ncat, init_scale=1.0, init_bias=0.0):
self.ncat = ncat
self.matching_fc = _matching_fc(latent_shape, 'pi', self.ncat, init_scale=init_scale, init_bias=init_bias)
def pdclass(self):
return CategoricalPd
def pdfromlatent(self, latent_vector):
pdparam = self.matching_fc(latent_vector)
return self.pdfromflat(pdparam), pdparam
def param_shape(self):
return [self.ncat]
def sample_shape(self):
return []
def sample_dtype(self):
return tf.int32
class CategoricalPd(Pd):
def __init__(self, logits):
self.logits = logits
def flatparam(self):
return self.logits
def mode(self):
return tf.argmax(self.logits, axis=-1)
@property
def mean(self):
return tf.nn.softmax(self.logits)
def neglogp(self, x):
# return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
# Note: we can't use sparse_softmax_cross_entropy_with_logits because
# the implementation does not allow second-order derivatives...
if x.dtype in {tf.uint8, tf.int32, tf.int64}:
# one-hot encoding
x_shape_list = x.shape.as_list()
logits_shape_list = self.logits.get_shape().as_list()[:-1]
for xs, ls in zip(x_shape_list, logits_shape_list):
if xs is not None and ls is not None:
assert xs == ls, 'shape mismatch: {} in x vs {} in logits'.format(xs, ls)
x = tf.one_hot(x, self.logits.get_shape().as_list()[-1])
else:
# already encoded
print('logits is {}'.format(self.logits))
assert x.shape.as_list() == self.logits.shape.as_list()
return tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
def kl(self, other):
a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)
a1 = other.logits - tf.reduce_max(other.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
z1 = tf.reduce_sum(ea1, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (a0 - tf.math.log(z0) - a1 + tf.math.log(z1)), axis=-1)
def entropy(self):
a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.math.log(z0) - a0), axis=-1)
def sample(self):
u = tf.random.uniform(tf.shape(self.logits), dtype=self.logits.dtype, seed=0)
return tf.argmax(self.logits - tf.math.log(-tf.math.log(u)), axis=-1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
def _matching_fc(tensor_shape, name, size, init_scale, init_bias):
if tensor_shape[-1] == size:
return lambda x: x
else:
return fc(tensor_shape, name, size, init_scale=init_scale, init_bias=init_bias)
|
import json
import os
import shutil
import typing as t
from abc import ABC, abstractmethod
import boto3
import requests
from botocore.config import Config
from pydantic import BaseModel
from test.config import FIRESTORE_EMULATOR_HOST
def load_json_file(path) -> t.Any:
with open(path) as f:
return json.load(f)
class FileSystemObjectSpec(ABC):
@abstractmethod
def write(self, parent: str = ""):
pass
class FileSpec(BaseModel, FileSystemObjectSpec):
path: str # should be relative to parent, unless there is no parent
content: str
def write(self, parent: str = ""):
with open(os.path.join(parent, self.path), "w") as f:
f.write(self.content)
@classmethod
def from_path(cls, path: str, parent: str = "") -> "FileSpec":
"""
`parent` will be excluded from this instance's path attribute,
keeping the path attribute relative.
"""
with open(os.path.join(parent, path)) as f:
content = f.read()
return cls(path=path, content=content)
class DirSpec(BaseModel, FileSystemObjectSpec):
"""Recursively write this directory and all of its children."""
path: str # should be relative to parent, unless there is no parent
children: t.List[FileSystemObjectSpec]
class Config:
arbitrary_types_allowed = True
def write(self, parent: str = ""):
full_path = os.path.join(parent, self.path)
os.makedirs(full_path)
for child in self.children:
child.write(full_path)
def remove(self, parent: str = ""):
shutil.rmtree(os.path.join(parent, self.path))
@classmethod
def from_path(cls, path: str, parent: str = "") -> "DirSpec":
"""
Build out a `DirSpec` instance for the directory at
`os.path.join(parent, path)`, along with all of its children.
"""
spec = cls(path=path, children=[])
base_path = os.path.join(parent, path)
for child_name in os.listdir(base_path):
child_path = os.path.join(base_path, child_name)
if os.path.isdir(child_path):
spec.children.append(cls.from_path(child_name, base_path))
elif os.path.isfile(child_path):
spec.children.append(FileSpec.from_path(child_name, base_path))
return spec
def clear_firestore():
# Clear the test database.
# Source: https://firebase.google.com/docs/emulator-suite/connect_firestore#clear_your_database_between_tests
res = requests.delete(f"http://{FIRESTORE_EMULATOR_HOST}/emulator/v1/projects/test/databases/(default)/documents")
res.raise_for_status()
def create_dynamodb_table(table_name: str, *, pk_field="id", sort_key_field=None, sort_key_type=None):
dynamodb = boto3.resource(
"dynamodb", endpoint_url=os.getenv("AWS_ENDPOINT"), config=Config(region_name=os.getenv("AWS_REGION"))
)
# Source:
valid_ddb_data_types = ["S", "N", "B", "BOOL"]
if sort_key_field is None:
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": pk_field, "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": pk_field, "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
if sort_key_field is not None:
if sort_key_type is None or sort_key_type not in valid_ddb_data_types:
# Source: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBMapper.DataTypes.html
raise Exception("sort key data type is either not provided or it is a wrong type")
else:
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[
{"AttributeName": pk_field, "KeyType": "HASH"},
{"AttributeName": sort_key_field, "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": pk_field, "AttributeType": "S"},
{"AttributeName": sort_key_field, "AttributeType": sort_key_type},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
table.meta.client.get_waiter("table_exists").wait(TableName=table_name)
return table
|
from django.db import models
import random
# Create your models here.
class PostEntryQuery(models.QuerySet):
def published(self):
return self.filter(published=True)
def randomPost(self):
return self.published().order_by('?')[:4]
def getFeaturedPost(self):
return random.choice(self.published())
# class Meta:
# app_label = 'terryblog'
class PostTag(models.Model):
tag = models.CharField(max_length=30)
def __str__(self):
return self.tag
# class Meta:
# app_label = 'terryblog'
class PostEntry(models.Model):
title = models.CharField(max_length=200)
hero_image = models.ImageField(upload_to='terryblog/media/')
post_body = models.TextField()
post_image = models.ImageField(upload_to='terryblog/media/', blank=True)
author = models.CharField(max_length=30)
created = models.DateTimeField('date published')
slug = models.SlugField(max_length=200, unique=True)
published = models.BooleanField(default=True)
tags = models.ManyToManyField(PostTag)
objects = PostEntryQuery.as_manager()
def __str__(self):
return self.title
# class Meta:
# app_label = 'terryblog'
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import User
from django.shortcuts import reverse
from django.test import TestCase
from rest_framework.test import APIClient
from ...views.index_views import user_login
from app.models import TheUser
# ----------------------------------------------------------------------------------------------------------------------
class IndexViewsTestCase(TestCase):
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username='api_login', email='api_login@email.com', password='123456')
cls.the_user = TheUser.objects.get(id_user=cls.user)
cls.client = APIClient()
cls.api_key = settings.API_SECRET_KEY
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_missing_params(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key, 'username': 'username'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], {'password': ['This field is required.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_too_long_username(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'a' * 40,
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], {'username': ['Ensure this field has no more than 30 characters.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_too_short_username(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'a',
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'],{'username': ['Ensure this field has at least 2 characters.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_username_regex_not_valid(self):
username_patterns = [
'ab#$@cdev', '#$@username', 'username%#&#&', 'db24!!!db34', '#$@234234', '#123dkf%'
]
for pattern in username_patterns:
with self.subTest(pattern=pattern):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': pattern,
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'],
{'username': ['This value does not match the required pattern.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_email_regex_not_valid(self):
email_patterns = [
'no_extension@ddd', '@first.missing', 'after_at_miss@', '$%#@474**.om', 'em#$@ail@m.com', '#em@ail@m.com'
]
for pattern in email_patterns:
with self.subTest(pattern=pattern):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': pattern,
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'],
{'username': ['This value does not match the required pattern.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_too_long_password(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'test_username',
'password': 'p' * 17})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], {'password': ['Ensure this field has no more than 16 characters.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_valid_username_user_not_exists(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'test_username',
'password': 'password'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.data['data']['token'], None)
self.assertEqual(response.data['detail'], 'not authenticated')
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_valid_email_user_not_exists(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'api_login_email@email.com',
'password': '123456'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.data['data']['token'], None)
self.assertEqual(response.data['detail'], 'not authenticated')
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_success_with_username(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'api_login',
'password': '123456'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['data']['token'], self.the_user.auth_token)
self.assertEqual(response.data['detail'], 'successful')
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_success_with_email(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'api_login@email.com',
'password': '123456'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['data']['token'], self.the_user.auth_token)
self.assertEqual(response.data['detail'], 'successful')
|
from xml.dom import minidom
from lxml import etree
import xml.etree.ElementTree as ET
from io import StringIO
import copy
import random
###TODO when update, also put in node thingy
# https://kite.com/python/examples/3486/xml-set-attributes-of-an-xml-element
# https://kite.com/python/examples/3491/xml-add-an-element-as-the-last-child-of-another-xml-element
# https://stackabuse.com/reading-and-writing-xml-files-in-python/
# P1 duplicate 1 plan 1000 times
# Useful for quick testing that reservation System actually affects the routing
def simplePlansGenerator():
tree = ET.parse("plans.xml")
root = tree.getroot()
person = tree.find('person')
for x in range(2, 1000):
#Deepcopy is needed, otherwise uses same object I guess
#And then copies the same ID to all of the people
#Which would make MATSim throw errors
#new_person = person
new_person = copy.deepcopy(person)
new_person.set('id', str(x))
root.append(new_person)
#ET.dump(root)
tree.write(open('plans1000simple.xml', 'w'), encoding='unicode')
# def complexPlansGenerator():
###TODO when update, also put in node thingy
def randomlyChangeMode(file, chance):
tree = ET.parse(file)
root = tree.getroot()
for person in root:
if random.random() < chance:
#plan = child.find('plan')
#legs = person.find('plan').find('leg')
#legs.set('mode', 'rcar')
legs = person.find('plan').findall('leg')
for leg in legs:
leg.set('mode', 'rcar')
tree.write('D:/tmp3/plansCPPwTFCT_25pct_fixed_' + str(chance) + '.xml')
#simplePlansGenerator()
randomlyChangeMode('D:/tmp3/plansCPPwTFCT_25pct_fixed.xml', 0.3)
print("Done") |
#!/usr/bin/python
# Copyright (c) 2018, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_load_balancer_certificate
short_description: Add or remove a SSL certificate from a load balancer in
OCI Load Balancing Service
description:
- Add a SSL certificate to OCI Load Balancer
- Delete a SSL certificate, if present.
version_added: "2.5"
options:
load_balancer_id:
description: Identifier of the Load Balancer in which the certificate belongs
required: true
aliases: ['id']
name:
description: The name of the certificate to add to the load balancer.
required: true
ca_certificate:
description: The Certificate Authority certificate, or any interim certificate,
that you received from your SSL certificate provider. The absolute
path of the certificate file should be provided.
required: false
passphrase:
description: A passphrase for encrypted private keys. This is needed only if you
created your certificate with a passphrase.
required: false
private_key :
description: The SSL private key for your certificate, in PEM format.The absolute
path of the private key file should be provided.
required: false
public_certificate:
description: The public certificate, in PEM format, that you received
from your SSL certificate provider. The absolute
path of the public certificate file should be provided.
required: false
state:
description: Create or delete certificate. For I(state=present),
if it does not exists, it gets added.
required: false
default: 'present'
choices: ['present','absent']
author:
- "Debayan Gupta(@debayan_gupta)"
extends_documentation_fragment: [ oracle, oracle_wait_options ]
"""
EXAMPLES = """
# Note: These examples do not set authentication details.
# Add a certificate bundle (without passphrase) to a loadbalancer
- name: Add a certificate bundle (without passphrase) to a loadbalancer
oci_load_balancer_certificate:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
name: "ansible_certtificate"
ca_certificate: "certificate_src/ca_cert.pem"
private_key: "certificate_src/private_key.pem"
public_certificate: "certificate_src/cert.pem"
state: 'present'
# Add a certificate bundle (with a passphrase for encrypted private keys) to a load balancer
- name: Create certificate with Passphrase
oci_load_balancer_certificate:
name: "ansible_cert_with_passphrase"
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
ca_certificate: "certificate_src/ca_cert.pem"
passphrase: "ansible"
private_key: "certificate_src/private_key_with_passphrase.pem"
public_certificate: "certificate_src/cert_with_passphrase.pem"
state: 'present'
# Delete a SSL Certificate from a load balancer
- name: Delete a SSL certificate
oci_load_balancer_certificate:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
name: "ansible_certtificate"
state: 'absent'
"""
RETURN = """
certificate:
description: Attributes of the created certificate.
For delete, deleted certificate description will
be returned.
returned: success
type: complex
contains:
certificate_name:
description: Name of the certificate
returned: always
type: string
sample: ansible_certificate
ca_certificate:
description: The Certificate Authority certificate, or any interim certificate,
that you received from your SSL certificate provider.
returned: always
type: string
sample: -----BEGIN CERTIFICATE-----
MIIDlTCCA
-----END CERTIFICATE-----
public_certificate:
description: The public certificate, in PEM format, that you received from
your SSL certificate provider.
returned: always
type: string
sample: -----BEGIN CERTIFICATE-----
MIIDlTCCAn
-----END CERTIFICATE-----
sample: {
"ca_certificate":"-----BEGIN CERTIFICATE-----\\nMIIDlTCCAn2gAw\\n-----END CERTIFICATE-----",
"certificate_name":"ansible_cert",
"public_certificate":"-----BEGIN CERTIFICATE-----\\nMIIDPjCCAiYCCQC5OEUUNtrC\\n-----END CERTIFICATE-----"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils, oci_lb_utils
try:
from oci.load_balancer.load_balancer_client import LoadBalancerClient
from oci.util import to_dict
from oci.load_balancer.models import CreateCertificateDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = None
def create_certificate(lb_client, module):
result = dict(changed=False, certificate="")
lb_id = module.params.get("load_balancer_id")
name = module.params.get("name")
certificate = oci_lb_utils.get_certificate(lb_client, module, lb_id, name)
create_certificate_details = oci_lb_utils.get_create_certificate_details(
module, name
)
same_certificate = False
if certificate is not None:
same_certificate = oci_lb_utils.is_same_certificate(
create_certificate_details, certificate
)
if same_certificate:
get_logger().info(
"Certificate %s with same attribute values already available", name
)
result["changed"] = False
result["certificate"] = to_dict(certificate)
else:
get_logger().error(
"Certificate %s with different attribute value already available in load balancer %s",
name,
lb_id,
)
module.fail_json(
msg="Certificate "
+ name
+ " with different attribute value already available in "
"load balancer " + lb_id
)
if not same_certificate:
get_logger().info(
"Creating certificate %s in the load balancer %s", name, lb_id
)
result = oci_lb_utils.create_or_update_lb_resources_and_wait(
resource_type="certificate",
function=lb_client.create_certificate,
kwargs_function={
"create_certificate_details": create_certificate_details,
"load_balancer_id": lb_id,
},
lb_client=lb_client,
get_sub_resource_fn=oci_lb_utils.get_certificate,
kwargs_get={
"lb_client": lb_client,
"module": module,
"lb_id": lb_id,
"name": name,
},
module=module,
)
get_logger().info(
"Successfully created certificate %s in the load balancer %s", name, lb_id
)
return result
def delete_certificate(lb_client, module):
lb_id = module.params.get("load_balancer_id")
name = module.params.get("name")
get_logger().info("Deleting certificate %s from the load balancer %s", name, lb_id)
result = oci_lb_utils.delete_lb_resources_and_wait(
resource_type="certificate",
function=lb_client.delete_certificate,
kwargs_function={"certificate_name": name, "load_balancer_id": lb_id},
lb_client=lb_client,
get_sub_resource_fn=oci_lb_utils.get_certificate,
kwargs_get={
"lb_client": lb_client,
"module": module,
"lb_id": lb_id,
"name": name,
},
module=module,
)
get_logger().info(
"Successfully deleted certificate %s from the load balancer %s", name, lb_id
)
return result
def set_logger(input_logger):
global logger
logger = input_logger
def get_logger():
return logger
def main():
logger = oci_utils.get_logger("oci_load_balancer_certificate")
set_logger(logger)
module_args = oci_utils.get_common_arg_spec(supports_wait=True)
module_args.update(
dict(
name=dict(type="str", required=True),
load_balancer_id=dict(type="str", required=True, aliases=["id"]),
ca_certificate=dict(type="str", required=False),
passphrase=dict(type="str", required=False, no_log=True),
private_key=dict(type="str", required=False),
public_certificate=dict(type="str", required=False),
state=dict(
type="str",
required=False,
default="present",
choices=["present", "absent"],
),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module")
lb_client = oci_utils.create_service_client(module, LoadBalancerClient)
state = module.params["state"]
if state == "present":
result = oci_utils.check_and_create_resource(
resource_type="certificate",
create_fn=create_certificate,
kwargs_create={"lb_client": lb_client, "module": module},
list_fn=lb_client.list_certificates,
kwargs_list={"load_balancer_id": module.params.get("load_balancer_id")},
module=module,
model=CreateCertificateDetails(),
)
elif state == "absent":
result = delete_certificate(lb_client, module)
module.exit_json(**result)
if __name__ == "__main__":
main()
|
# -*- coding: iso-8859-1 -*-
"""Output module.
"""
# Copyright (C) 2004, 2005, 2006, 2010 Juan M. Bello Rivas <jmbr@superadditive.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import Halberd.logger
import Halberd.clues.analysis as analysis
def report(scantask):
"""Displays detailed report information to the user.
"""
if scantask.out:
out = open(scantask.out, 'a')
else:
out = sys.stdout
clues = scantask.analyzed
hits = analysis.hits(clues)
logger = Halberd.logger.getLogger()
# xxx This could be passed by the caller in order to avoid recomputation in
# case the clues needed a re-analysis.
diff_fields = analysis.diff_fields(clues)
out.write('=' * 70 + '\n')
out.write('%s' % scantask.url)
if scantask.addr:
out.write(' (%s)' % scantask.addr)
out.write(': %d real server(s)\n' % len(clues))
out.write('=' * 70 + '\n')
for num, clue in enumerate(clues):
assert hits > 0
info = clue.info
out.write('\n')
# out.write('-' * 70 + '\n')
out.write('server %d: %s\n' % (num + 1, info['server'].lstrip()))
out.write('-' * 70 + '\n\n')
out.write('difference: %d seconds\n' % clue.diff)
out.write('successful requests: %d hits (%.2f%%)\n' \
% (clue.getCount(), clue.getCount() * 100 / float(hits)))
if info['contloc']:
out.write('content-location: %s\n' % info['contloc'].lstrip())
if len(info['cookies']) > 0:
out.write('cookie(s):\n')
for cookie in info['cookies']:
out.write(' %s\n' % cookie.lstrip())
out.write('header fingerprint: %s\n' % info['digest'])
different = [(field, value) for field, value in clue.headers \
if field in diff_fields]
if different:
out.write('different headers:\n')
idx = 1
for field, value in different:
out.write(' %d. %s:%s\n' % (idx, field, value))
idx += 1
if scantask.debug:
import pprint
out.write('headers:\n')
pprint.pprint(clue.headers, out)
# vim: ts=4 sw=4 et
|
import torch.nn as nn
import pretrainedmodels
from mlcomp.contrib.torch.layers import LambdaLayer
class Pretrained(nn.Module):
def __init__(self, variant, num_classes, pretrained=True, activation=None):
super().__init__()
params = {'num_classes': 1000}
if not pretrained:
params['pretrained'] = None
model = pretrainedmodels.__dict__[variant](**params)
self.model = model
linear = self.model.last_linear
if isinstance(linear, nn.Linear):
self.model.last_linear = nn.Linear(
model.last_linear.in_features,
num_classes
)
self.model.last_linear.in_channels = linear.in_features
elif isinstance(linear, nn.Conv2d):
self.model.last_linear = nn.Conv2d(
linear.in_channels,
num_classes,
kernel_size=linear.kernel_size,
bias=True
)
self.model.last_linear.in_features = linear.in_channels
self.model.last_linear = nn.Sequential(
LambdaLayer(lambda x: x.unsqueeze_(0)),
nn.AdaptiveAvgPool1d(self.model.last_linear.in_channels),
LambdaLayer(lambda x: x.squeeze_(0).view(x.size(0), -1)),
self.model.last_linear
)
if callable(activation) or activation is None:
self.activation = activation
elif activation == 'softmax':
self.activation = nn.Softmax(dim=1)
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
else:
raise ValueError(
'Activation should be "sigmoid"/"softmax"/callable/None')
def forward(self, x):
res = self.model(x)
if isinstance(res, tuple):
res = res[0]
if self.activation:
res = self.activation(res)
return res
__all__ = ['Pretrained']
|
from typing import List
from fastapi import File, UploadFile
from pydantic import BaseModel
class Text(BaseModel):
text: str
class Words(BaseModel):
words: List[str]
class ImageFile(BaseModel):
file: UploadFile = File(...)
class ExtractedNER(BaseModel):
text: str
start: int
end: int
label: str
type: str
class DBConnectionInfo(BaseModel):
host: str
port: str
user: str
password: str
dbname: str
# text_table_name: str = None
# tag_table_name: str = None
# text_col_name: str = None
# features_col_name: str = None
# labels_col_name: str = None
# start_id: int = None
class DBServer(BaseModel):
host: str
port: str
user: str
password: str
db_name: str
class Tokens(BaseModel):
start: int
end: int
text: str
type: str
classId: str
label: str
class AnnotatedData(BaseModel):
id: int
tokens: str
labels: str
features_col_name: str
labels_col_name: str
text_table_name: str
connection_info: DBConnectionInfo
class StorageAuthentication(BaseModel):
host: str
port: str
secretKey: str
accessKey: str
|
# -*- coding: utf-8 -*-
"""
:author @CAB233
:url https://github.com/CAB233/everphoto_checkin
cron: 3 22 * * *
new Env('时光相册');
"""
import json
import requests
from notify_mtr import send
from utils import get_data
class EverPhoto:
def __init__(self, check_items):
self.check_items = check_items
def main(self):
msg_all = ""
for check_item in self.check_items:
mobile = check_item.get("mobile")
password = check_item.get("password")
header = {}
url = "https://api.everphoto.cn/users/self/checkin/v2"
login_url = "https://web.everphoto.cn/api/auth"
login_key = f"mobile={mobile}&password={password}"
login_res = requests.post(login_url, data=login_key, headers=header)
login_data = json.loads(login_res.text)["data"]
header["authorization"] = "Bearer "+login_data["token"]
response = requests.post(url, headers=header)
data = json.loads(response.text)
checkin_result = data["data"]["checkin_result"]
continuity = data["data"]["continuity"]
msg = (
"是否为今日第一次签到:"
+ str(checkin_result)
+ "\n"
+ "累积签到天数:"
+ str(continuity)
)
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("EVERPHOTO", [])
res = EverPhoto(check_items=_check_items).main()
print(res)
send("时光相册", res)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Initialise '.
Must be called before using '.
This commands should be called from the top directory of your project or with
the path to the top directory as an argument.
The command will create a hidden directory called .and copy the default
settings into this directory.
"""
# Copyright (C) 2018-2019 Håkon Austlid Taskén <hakon.tasken@gmail.com>
# Licenced under the MIT License.
import os
import argparse
import sys
import shutil
def command_line_arguments_parser(name_command_line_tool="sim_db",
name_command="init"):
parser = argparse.ArgumentParser(
description=("Initialises 'sim_db' and must be called before using "
"'sim_db'. Will create a '.sim_db/' directory."),
prog="{0} {1}".format(name_command_line_tool, name_command))
parser.add_argument(
'--path',
type=str,
default=None,
help=
("Path to the top directory of project. If not passed as an "
"argument, the current working directory is assumed to be the top "
"directory."))
return parser
def init(name_command_line_tool="sim_db", name_command="init", argv=None):
args = command_line_arguments_parser(name_command_line_tool,
name_command).parse_args(argv)
if args.path == None:
args.path = os.getcwd()
elif args.path[-1] == '/':
args.path = args.path[:-1]
path_dot_sim_db_dir = os.path.join(args.path, '.sim_db')
if os.path.exists(path_dot_sim_db_dir):
if os.path.isdir(path_dot_sim_db_dir):
if os.path.exists(
os.path.join(path_dot_sim_db_dir, "settings.txt")):
print("sim_db is already initialized in {0}/".format(
path_dot_sim_db_dir))
exit()
os.mkdir(path_dot_sim_db_dir)
path_default_settings = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'default_settings.txt'))
shutil.copyfile(
path_default_settings,
os.path.join(os.path.join(args.path, '.sim_db'), 'settings.txt'))
print("Initialized sim_db directory.")
if __name__ == '__main__':
init("", sys.argv[0], sys.argv[1:])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017, AGB & GC
# Full license can be found in License.md
# ----------------------------------------------------------------------------
""" Functions that specify the boundary location as a function of MLT
Functions
---------
circular(mlt, [r_add])
Return a circular boundary correction for a specified offset
elliptical(mlt, [instrument, method])
Return the ellptical boundary correction for a data set and method
harmonic(mlt, [instrument, method])
Return the harmonic boundary correction for a data set and method
References
----------
Burrell, A. G. et al.: AMPERE Polar Cap Boundaries, Ann. Geophys., 38, 481-490,
doi:10.5194/angeo-38-481-2020, 2020.
"""
import numpy as np
from ocbpy.ocb_time import hr2rad
def circular(mlt, r_add=0.0):
"""Return a circular boundary correction
Parameters
----------
mlt : (float or array-like)
Magnetic local time in hours (not actually used)
r_add : (float)
Offset added to default radius in degrees. Positive values shift the
boundary equatorward, whilst negative values shift the boundary
poleward. (default=0.0)
Returns
-------
r_corr : (float or array-like)
Radius correction in degrees at this MLT
"""
mlt = np.asarray(mlt)
r_corr = np.full(shape=mlt.shape, fill_value=r_add)
return r_corr
def elliptical(mlt, instrument='ampere', method='median'):
""" Return the results of an elliptical correction to the data boundary
Parameters
----------
mlt : (float or array-like)
Magnetic local time in hours
instrument : (str)
Data set's instrument name (default='ampere')
method : (str)
Method used to calculate the elliptical correction, accepts
'median' or 'gaussian'. (default='median')
Returns
-------
r_corr : (float or array-like)
Radius correction in degrees at this MLT
References
----------
Burrell, A. G. et al.: AMPERE Polar Cap Boundaries, Ann. Geophys., 38,
481-490, doi:10.5194/angeo-38-481-2020, 2020.
"""
if instrument.lower() != 'ampere':
raise ValueError("no elliptical correction for {:}".format(instrument))
method = method.lower()
coeff = {"median": {"a": 4.01, "e": 0.55, "t": -0.92},
"gaussian": {"a": 4.41, "e": 0.51, "t": -0.95}}
if method not in coeff.keys():
raise ValueError("unknown coefficient computation method")
mlt_rad = hr2rad(mlt)
r_corr = (coeff[method]["a"] * (1.0-coeff[method]["e"]**2) /
(1.0 + coeff[method]["e"]*np.cos(mlt_rad-coeff[method]["t"])))
# Because this is a poleward correction, return the negative
return -r_corr
def harmonic(mlt, instrument='ampere', method='median'):
"""Return the results of a harmonic fit correction to the data boundary
Parameters
----------
mlt : (float or array-like)
Magnetic local time in hours
instrument : (str)
Data set's instrument name (default='ampere')
method : (str)
Method used to determine coefficients; accepts 'median' or
'gaussian' (default='median')
Returns
-------
r_corr : (float or array-like)
Radius correction in degrees at this MLT
References
----------
Burrell, A. G. et al.: AMPERE Polar Cap Boundaries, Ann. Geophys., 38,
481-490, doi:10.5194/angeo-38-481-2020, 2020.
"""
if instrument.lower() != 'ampere':
raise ValueError("no harmonic correction for {:}".format(instrument))
method = method.lower()
coeff = {'median': [3.31000535, -0.5452934, -1.24389141, 2.42619653,
-0.66677988, -1.03467488, -0.30763009, 0.52426756,
0.04359299, 0.60201848, 0.50618522, 1.04360529,
0.25186405],
'gaussian': [3.80100827, 0.98555723, -3.43760943, 1.85084271,
-0.36730751, -0.81975654, -1.02823832, 1.30637288,
-0.53599218, 0.40380183, -1.22462708, -1.2733629,
-0.62743381]}
if method not in coeff.keys():
raise ValueError("unknown coefficient computation method")
rad_mlt = hr2rad(mlt)
r_corr = coeff[method][0] \
+ coeff[method][1] * np.cos(rad_mlt+coeff[method][2]) \
+ coeff[method][3] * np.sin(rad_mlt+coeff[method][4]) \
+ coeff[method][5] * np.cos(2.0 * (rad_mlt+coeff[method][6])) \
+ coeff[method][7] * np.sin(2.0 * (rad_mlt+coeff[method][8])) \
+ coeff[method][9] * np.cos(3.0 * (rad_mlt+coeff[method][10])) \
+ coeff[method][11] * np.sin(3.0 * (rad_mlt+coeff[method][12]))
# Because this is a poleward shift, return the negative of the correction
return -r_corr
|
# -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
import atexit
import errno
import os
import signal
import sys
def daemonize(pidfile=None, progname=None, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null', umask=022):
"""Fork a daemon process."""
if pidfile:
# Check whether the pid file already exists and refers to a still
# process running
pidfile = os.path.abspath(pidfile)
if os.path.exists(pidfile):
with open(pidfile) as fileobj:
try:
pid = int(fileobj.read())
except ValueError:
sys.exit('Invalid pid in file %s\nPlease remove it to '
'proceed' % pidfile)
try: # signal the process to see if it is still running
os.kill(pid, 0)
if not progname:
progname = os.path.basename(sys.argv[0])
sys.exit('%s is already running with pid %s' % (progname, pid))
except OSError, e:
if e.errno != errno.ESRCH:
raise
# The pid file must be writable
try:
fileobj = open(pidfile, 'a+')
fileobj.close()
except IOError, e:
from trac.util.text import exception_to_unicode
sys.exit('Error writing to pid file: %s' % exception_to_unicode(e))
# Perform first fork
pid = os.fork()
if pid > 0:
sys.exit(0) # exit first parent
# Decouple from parent environment
os.chdir('/')
os.umask(umask)
os.setsid()
# Perform second fork
pid = os.fork()
if pid > 0:
sys.exit(0) # exit second parent
# The process is now daemonized, redirect standard file descriptors
for stream in sys.stdout, sys.stderr:
stream.flush()
stdin = open(stdin, 'r')
stdout = open(stdout, 'a+')
stderr = open(stderr, 'a+', 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
if pidfile:
# Register signal handlers to ensure atexit hooks are called on exit
for signum in [signal.SIGTERM, signal.SIGHUP]:
signal.signal(signum, handle_signal)
# Create/update the pid file, and register a hook to remove it when the
# process exits
def remove_pidfile():
if os.path.exists(pidfile):
os.remove(pidfile)
atexit.register(remove_pidfile)
with open(pidfile, 'w') as fileobj:
fileobj.write(str(os.getpid()))
def handle_signal(signum, frame):
"""Handle signals sent to the daemonized process."""
sys.exit()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description :Dump data to file
@Time :2020/08/27 16:01:59
@Author :sam.qi
@Version :1.0
'''
import traceback
from mp.dump.dump import Dump
class FileDump(Dump):
def __init__(self, filepath):
super().__init__()
self.filepath = filepath
def work(self):
with open(self.filepath, 'w', encoding="utf-8") as f:
while True:
try:
msg = self.get_input_queue().get()
if msg is None:
break
f.write(str(msg))
f.write("\n")
except Exception as identifier:
traceback.print_exc()
print(identifier)
break
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from unittest import TestCase
from mock import patch
from pyqrllib.pyqrllib import sha2_256
from tests.misc.helper import get_alice_xmss, get_bob_xmss
from qrl.core import config
from qrl.core.misc import logger
from qrl.core.Block import Block
from qrl.core.txs.Transaction import Transaction
from qrl.core.txs.CoinBase import CoinBase
from qrl.crypto.misc import merkle_tx_hash
from tests.misc.helper import replacement_getTime
logger.initialize_default()
@patch('qrl.core.misc.ntp.getTime', new=replacement_getTime)
class TestBlock(TestCase):
def __init__(self, *args, **kwargs):
super(TestBlock, self).__init__(*args, **kwargs)
def test_init(self):
# TODO: Not much going on here..
block = Block()
self.assertIsNotNone(block) # just to avoid warnings
def test_verify_blob(self):
alice_xmss = get_alice_xmss()
block = Block.create(block_number=5,
prev_headerhash=bytes(sha2_256(b'test')),
prev_timestamp=10,
transactions=[],
miner_address=alice_xmss.address)
mining_blob = block.mining_blob
self.assertTrue(block.blockheader.verify_blob(mining_blob))
def test_mining_blob(self):
alice_xmss = get_alice_xmss()
block = Block.create(block_number=5,
prev_headerhash=bytes(sha2_256(b'test')),
prev_timestamp=10,
transactions=[],
miner_address=alice_xmss.address)
block.set_nonces(mining_nonce=5, extra_nonce=4)
mining_blob = block.mining_blob
self.assertEqual(len(mining_blob), config.dev.mining_blob_size)
mining_nonce_bytes = mining_blob[config.dev.mining_nonce_offset:config.dev.mining_nonce_offset + 4]
extra_nonce_bytes = mining_blob[config.dev.extra_nonce_offset:config.dev.extra_nonce_offset + 8]
mining_nonce = int.from_bytes(mining_nonce_bytes, byteorder='big', signed=False)
extra_nonce = int.from_bytes(extra_nonce_bytes, byteorder='big', signed=False)
self.assertEqual(mining_nonce, 5)
self.assertEqual(extra_nonce, 4)
def test_set_mining_nonce_from_blob(self):
alice_xmss = get_alice_xmss()
block = Block.create(block_number=5,
prev_headerhash=bytes(sha2_256(b'test')),
prev_timestamp=10,
transactions=[],
miner_address=alice_xmss.address)
current_mining_nonce = block.mining_nonce
current_headerhash = block.headerhash
mining_blob = block.mining_blob
block.blockheader.set_mining_nonce_from_blob(mining_blob)
self.assertEqual(block.blockheader.mining_nonce, current_mining_nonce)
self.assertEqual(block.headerhash, current_headerhash)
self.assertEqual(block.blockheader.mining_blob, mining_blob)
def test_update_mining_address(self):
alice_xmss = get_alice_xmss()
bob_xmss = get_bob_xmss()
block = Block.create(block_number=5,
prev_headerhash=bytes(sha2_256(b'test')),
prev_timestamp=10,
transactions=[],
miner_address=alice_xmss.address)
block.update_mining_address(mining_address=bob_xmss.address)
coinbase_tx = Transaction.from_pbdata(block.transactions[0])
self.assertTrue(isinstance(coinbase_tx, CoinBase))
self.assertEqual(coinbase_tx.addr_to, bob_xmss.address)
hashedtransactions = []
for tx in block.transactions:
hashedtransactions.append(tx.transaction_hash)
self.assertEqual(block.blockheader.tx_merkle_root, merkle_tx_hash(hashedtransactions))
|
from django.shortcuts import render
from rest_framework import viewsets
# Create your views here.
from employees.serializers import EmployeeSerializer
from employees.models import Employee
from employees.pagination import MyPagination
class EmployeeViewSet(viewsets.ModelViewSet):
queryset = Employee.objects.all()
serializer_class = EmployeeSerializer
pagination_class = MyPagination
search_fields = ('first_name','last_name',)
|
from output.models.sun_data.mgroup.particles.particles00303m.particles00303m1_xsd.particles00303m1 import A
__all__ = [
"A",
]
|
import os.path
import pandas as pd
from pathlib import Path
old_file = "data/nmdc_test_data_valid.tsv"
extension_val = ".tsv"
suffix_val = "_lowercased"
path_prefix = "artifacts"
basename = Path(old_file).stem
with_extension = basename + suffix_val + extension_val
with_path = os.path.join(path_prefix, with_extension)
helper_file = "artifacts/for_data_harmonizer_template/exampleInput/soil_emsl_jgi_mg_capitalizastion_helper.tsv"
helper = pd.read_csv(helper_file, sep="\t")
# helper = helper[0:2]
old_col_names = helper[0:1].values[0]
new_col_names = helper[1:2].values[0]
old_frame = pd.read_csv(old_file, sep="\t", skiprows=1)
old_col_count = len(old_frame.columns)
new_orientation = old_frame[old_col_names]
new_col_count = len(new_orientation.columns)
new_orientation.columns = new_col_names
new_orientation.to_csv(with_path, sep="\t", index=False)
|
# -*- coding:utf-8 -*-
"""
@version: 1.0
@author: kevin
@license: Apache Licence
@contact: liujiezhang@bupt.edu.cn
@site:
@software: PyCharm Community Edition
@file: adios_train.py
@time: 17/05/03 17:39
"""
import json
import os
import time
from math import ceil
import numpy as np
import tensorflow as tf
import yaml
from keras import backend as K
from sklearn import linear_model as lm
from utils.data_helper import build_data_cv
from utils.hiso import HISO
from utils.metrics import (Average_precision, Coverage, Hamming_loss,
One_error, Ranking_loss, Construct_thresholds)
K.set_learning_phase(1)
def do_eval(sess, model, thres_model, eval_data, batch_size):
'''
eval test data for moedel.
:param sess:
:param model:
:param eval_data:
:param batch_size:
:return:
'''
K.set_learning_phase(0)
number_of_data = len(eval_data)
number_of_batch = ceil(number_of_data / batch_size)
Y0_labels, Y1_labels, Y0_probs, Y1_probs = [], [], [], []
eval_loss, eval_cnt = 0., 0.
for batch in range(number_of_batch):
start = batch_size * batch
end = start + min(batch_size, number_of_data - start)
eval_Y0_labels = [hml.top_label for hml in eval_data[start:end]]
eval_Y1_labels = [hml.bottom_label for hml in eval_data[start:end]]
curr_loss, eval_Y0_probs, eval_Y1_probs = sess.run(
[model.loss, model.Y0_probs, model.Y1_probs],
feed_dict={
model.wds: [hml.wds for hml in eval_data[start:end]],
model.pos: [hml.pos for hml in eval_data[start:end]],
model.Y0: eval_Y0_labels,
model.Y1: eval_Y1_labels
# K.learning_phase(): 0
})
eval_loss += curr_loss
eval_cnt += 1
Y0_labels.extend(eval_Y0_labels)
Y1_labels.extend(eval_Y1_labels)
Y0_probs.extend(eval_Y0_probs)
Y1_probs.extend(eval_Y1_probs)
# evaluation metrics
Y0_labels = np.array(Y0_labels)
Y1_labels = np.array(Y1_labels)
Y0_probs = np.array(Y0_probs)
Y1_probs = np.array(Y1_probs)
print('\n')
print('Y0 label:', Y0_labels[3])
print('Y0 probs:', Y0_probs[3])
print('Y1 probs:', Y1_probs[3])
print('Y1 label:', Y1_labels[3])
print('\n')
# probs to predict label over thresholds
# fit_threshold automatally
Y0_preds = Y0_probs >= 0.7
# T1 = thres_model.predict(Y1_probs)
Y1_preds = Y1_probs >= 0.3
loss_dict = {'eval_loss': eval_loss / eval_cnt, 'Y0': {}, 'Y1': {}}
# use eval
func_eval = [
'Hamming_loss', 'One_error', 'Ranking_loss', 'Coverage',
'Average_precision'
]
# 0: 伟哥的评判标准, 1:正确的评判标准
mode = 1
for func in func_eval:
if func == 'Hamming_loss':
loss_dict['Y0'][func] = eval(func)(Y0_labels, Y0_preds, mode=mode)
loss_dict['Y1'][func] = eval(func)(Y1_labels, Y1_preds, mode=mode)
else:
loss_dict['Y0'][func] = eval(func)(Y0_labels, Y0_probs, mode=mode)
loss_dict['Y1'][func] = eval(func)(Y1_labels, Y1_probs, mode=mode)
K.set_learning_phase(1)
return loss_dict
def train(params):
'''
训练模型入口
:param params: 模型参数 dict
:return:
'''
datas, voc, pos, max_length = build_data_cv(
file_path='../docs/data/HML_JD_ALL.new.dat',
voc_path='../docs/data/voc.json',
pos_path='../docs/data/pos.json',
cv=5)
# fill params
params['voc_size'] = len(voc)
params['pos_size'] = len(pos)
params['words']['dim'] = max_length
params['pos']['dim'] = max_length
print(json.dumps(params, indent=4))
# split test and train
test_datas = list(filter(lambda data: data.cv_n == 1, datas))
train_datas = list(filter(lambda data: data.cv_n != 1, datas))
print('train dataset: {}'.format(len(train_datas)))
print('test dataset: {}'.format(len(test_datas)))
print('max length: {}'.format(max_length))
# build model
timestamp = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
log_test_dir = '../docs/test/%s' % timestamp
log_train_dir = '../docs/train/%s' % timestamp
os.mkdir(log_test_dir)
os.mkdir(log_train_dir)
# log_baseline_dir = '../docs/baseline/'
# os.mkdir(log_baseline_dir)
loss_key = [
'Hamming_loss', 'One_error', 'Ranking_loss', 'Coverage',
'Average_precision'
]
# 设置gpu限制
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = params['gpu_fraction']
number_of_training_data = len(train_datas)
batch_size = params['batch_size']
number_of_batch = int(ceil(number_of_training_data / batch_size))
# 保存最优模型
model_dir = params['model_dir'] + time.strftime("%Y-%m-%d-%H:%M:%S",
time.localtime())
os.mkdir(model_dir)
model_name = model_dir + '/' + params['model_name']
with tf.Session(config=config) as sess, tf.device('/gpu:1'):
hiso = HISO(params)
saver = tf.train.Saver(max_to_keep=4)
# baseline_writer = tf.summary.FileWriter(log_baseline_dir)
test_writer = tf.summary.FileWriter(log_test_dir)
train_writer = tf.summary.FileWriter(log_train_dir, sess.graph)
init_op = tf.global_variables_initializer()
sess.run(init_op)
step = 0
min_hamming_loss = 10
best_sess = sess
rig_labels, rig_probs = [], []
for epoch in range(params['epoch']):
# shuffle in each epoch
train_datas = np.random.permutation(train_datas)
for batch in range(number_of_batch):
step += 1
start = batch_size * batch
end = start + min(batch_size, number_of_training_data - start)
wds = [hml.wds for hml in train_datas[start:end]]
pos = [hml.pos for hml in train_datas[start:end]]
Y0 = [hml.top_label for hml in train_datas[start:end]]
Y1 = [hml.bottom_label for hml in train_datas[start:end]]
trn_loss, trn_Y1_probs, _ = sess.run(
[hiso.loss, hiso.Y1_probs, hiso.train_op],
feed_dict={
hiso.wds: wds,
hiso.pos: pos,
hiso.Y0: Y0,
hiso.Y1: Y1
# K.learning_phase(): 1
})
rig_labels.extend(Y1)
rig_probs.extend(trn_Y1_probs)
timestamp = time.strftime("%Y-%m-%d-%H:%M:%S",
time.localtime())
str_loss = '{}: epoch: {}, step: {}, train_loss: {}'.format(
timestamp, epoch, step, trn_loss)
print(str_loss)
# log train loss
if step % params['log_train_every'] == 0:
train_writer.add_summary(
tf.Summary(value=[
tf.Summary.Value(
tag="loss", simple_value=trn_loss)
]),
step)
# base_value = [tf.Summary.Value(tag='Y1_Hamming_loss', simple_value=0.0809),
# tf.Summary.Value(tag='Y1_Average_precision', simple_value=0.4692),
# tf.Summary.Value(tag='Y1_Coverage', simple_value=1.8400),
# tf.Summary.Value(tag='Y1_One_error', simple_value=0.4852),
# tf.Summary.Value(tag='Y1_Hamming_loss', simple_value=0.0809),
# tf.Summary.Value(tag='Y1_Ranking_loss', simple_value=0.2865),
# ]
# base_summary = tf.Summary(value=base_value)
# baseline_writer.add_summary(base_summary, step)
# log eval data
if step % params['log_eval_every'] == 0:
# fit Y1 thresholds
print('fit thresholds...')
T1 = Construct_thresholds(rig_labels, rig_probs)
thres_lr = lm.Ridge()
thres_lr.fit(rig_probs, T1)
print('fit done!')
loss_dict = do_eval(sess, hiso, thres_lr, test_datas, batch_size)
rig_labels, rig_probs = [], []
timestamp = time.strftime("%Y-%m-%d-%H:%M:%S",
time.localtime())
str_loss = '{}: epoch: {}, step: {}, eval_loss: {}'.format(
timestamp, epoch, step, loss_dict['eval_loss'])
print(str_loss)
value = [
tf.Summary.Value(
tag="loss", simple_value=loss_dict['eval_loss'])
]
for key in loss_key:
value.append(
tf.Summary.Value(
tag="Y0_%s" % key,
simple_value=loss_dict['Y0'][key]))
value.append(
tf.Summary.Value(
tag="Y1_%s" % key,
simple_value=loss_dict['Y1'][key]))
if key == 'Hamming_loss':
print('Y0_{}:\t{}\tY1_{}:\t{}'.format(
key, loss_dict['Y0'][key], key, loss_dict['Y1']
[key]))
summary = tf.Summary(value=value)
test_writer.add_summary(summary, step)
# judge whether test_acc is greater than before
if loss_dict['Y1']['Hamming_loss'] < min_hamming_loss:
min_hamming_loss = loss_dict['Y1']['Hamming_loss']
best_sess = sess
saver.save(
best_sess,
model_name + '-%s' % min_hamming_loss,
global_step=step,
write_meta_graph=True)
# predict and save train data
test_writer.close()
train_writer.close()
# predict(
# best_sess,
# hiso,
# datas,
# batch_size,
# save_name='data-%s.txt' % timestamp)
def predict(sess, model, dataset, batch_size, save_name='eval.csv'):
'''
predict labels.
'''
print('start to predict labels.....')
K.set_learning_phase(0)
number_of_data = len(dataset)
number_of_batch = int(ceil(number_of_data / batch_size))
with open('../docs/result/%s' % save_name, 'w') as f:
for batch in range(number_of_batch):
print('current process {} -- {}'.format(number_of_batch, batch))
start = batch_size * batch
end = start + min(batch_size, number_of_data - start)
cur_wds = [hml.wds for hml in dataset[start:end]]
cur_pos = [hml.pos for hml in dataset[start:end]]
cur_Y0 = [hml.top_label for hml in dataset[start:end]]
cur_Y1 = [hml.bottom_label for hml in dataset[start:end]]
curr_Y0_probs, curr_Y1_probs = sess.run(
[model.Y0_probs, model.Y1_probs],
feed_dict={
model.wds: cur_wds,
model.pos: cur_pos
# K.learning_phase(): 1
})
# transform [1] -> 'POSITIVE'
for i in range(start, end):
dataset[i].top_probs = ' '.join(
[str(s) for s in curr_Y0_probs[i]])
dataset[i].bottom_probs = ' '.join(
[str(s) for s in curr_Y1_probs[i]])
line = '{}\t{}\t{}\t{}\t{}\n'.format(' '.join(
[str(x)
for x in cur_Y1[i]]), dataset[i].bottom_probs, ' '.join(
[str(x) for x in cur_Y0[i]]), dataset[i].top_probs,
dataset[i].raw_sentence)
f.write(line)
K.set_learning_phase(1)
def load_predict(model_meta_path,
predict_path,
save_name='eval.txt',
mode='eval',
batch_size=128):
'''
load lastest model and predict datas.
:return:
'''
# 动态申请gpu,用多少申请多少
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
saver = tf.train.import_meta_graph(model_meta_path)
saver.restore(
sess, tf.train.latest_checkpoint(os.path.dirname(model_meta_path)))
# load graph
graph = tf.get_default_graph()
# get input placeholder
tf_wds = graph.get_tensor_by_name('words:0')
tf_pos = graph.get_tensor_by_name('pos:0')
# tf_combine_feature = graph.get_tensor_by_name('combine_feature:0')
tf_Y0_probs = graph.get_tensor_by_name('Y0_probs:0')
tf_Y1_probs = graph.get_tensor_by_name('Y1_probs:0')
model = TFModel(tf_wds, tf_pos, tf_Y0_probs, tf_Y1_probs)
# predict and save eval data
datas, vocab, pos, max_length = build_data_cv(
file_path=predict_path,
voc_path='../docs/voc.json',
pos_path='../docs/pos.json',
cv=5)
predict(sess, model, datas, batch_size, save_name=save_name)
class TFModel(object):
def __init__(self, wds, pos, Y0_probs, Y1_probs):
self.wds = wds
self.pos = pos
self.Y0_probs = Y0_probs
self.Y1_probs = Y1_probs
if __name__ == '__main__':
# load params
params = yaml.load(open('./utils/params.yaml', 'r'))
train(params)
|
"""A (very) simple banning & rate limiting extension for Flask.
"""
from inspect import signature
import time
from collections import deque
from functools import wraps
from flask import request
class IP:
def __init__(self, ban_count, rate_count):
"""IP record that keeps track of reports and requests made by that IP.
Args:
ban_count (int): number of reports to keep
rate_count (int): number of requets to keep
"""
self.ban_active = False
self.ban_entries = deque(maxlen=ban_count)
self.rate_entries = deque(maxlen=rate_count)
# setters
self.add_report = lambda: self.ban_entries.append(time.time())
self.add_entry = lambda: self.rate_entries.append(time.time())
class GateKeeper:
def __init__(self, app=None, ban_rule=None, rate_limit_rule=None, ip_header=None, rate_limit_func=None, ban_func=None):
"""GateKeeper instance around a flask app.
Provides rate-limiting & ban functions.
Rate limiting is done automatically,
but you have to specify when an IP should be reported, using `.report()` (usually in your login route when the creds are not valid)
ban_rule should be a list [<ban_count>,<ban_window>,<ban_duration>] where
- ban_count is the number of reports before actually banning the IP
- ban_window is the rolling time window to look for ban reports
- ban_duration is the duration of the ban in seconds.
rate_limit_rule should be a list [<rate_count>,<rate_window>] where
- rate_count is the maximum number of requests
- rate_window is the rolling time window for the rate count.
As an example, ban_rule=[3,60,600], rate_limit_rule=[100,10] would:
- ban any IP for 600s if it has been reported 3 times in the last 60s,
- rate limit any IP if it has made more than 100 requests in the last 10s.
If you do not set ban_rule, no banning will be done. Same goes for the rate limiting.
Requests made during the rate limiting period or ban period are not counted.
You can delay the init by omitting `app` here and calling `.init_app()` later.
If you set ip_header but the header is not present in the request, it falls back to a "no-ip" string, and any request made by potentially different clients will be added to this.
ban_func & rate_limit_func should be functions with 3 parameters that returns a string.
The parameters are ban_count/rate_limit, time_window, retry_in.
Args:
app (flask.Flask, optional): Flask app to wrap around. Defaults to None.
ban_rule (list, optional): Global ban rule for the whole app. Defaults to None.
rate_limit_rule (list, optional): Global rate limit rule for the whole app. Defaults to None.
ip_header (str, optional): Header to check for the IP. useful with a proxy that will add a header with the ip of the actual client. Defaults to request.remote_addr.
rate_limit_func(function, optional): Function that can be used for the body of the response when rate-limited
ban_func(function, optional): Function that can be used for the body of the response when banned
"""
if ban_rule:
self.ban_enabled = True
self.ban_count = ban_rule[0]
self.ban_window = ban_rule[1]
self.ban_duration = ban_rule[2]
else:
self.ban_enabled = False
if rate_limit_rule:
self.rate_limit_enabled = True
self.rate_count = rate_limit_rule[0]
self.rate_window = rate_limit_rule[1]
else:
self.rate_limit_enabled = False
if callable(rate_limit_func) and len(signature(rate_limit_func).parameters) == 3:
self._rate_limit_func = rate_limit_func
else:
self._rate_limit_func = lambda rate_limit, rate_window, retry_in: "rate-limited for {}s (over {} requests in a {}s window)".format(
retry_in, rate_limit, rate_window)
if callable(ban_func) and len(signature(ban_func).parameters) == 3:
self._ban_func = ban_func
else:
self._ban_func = lambda ban_count, ban_window, retry_in: "banned for {}s (reported {} times in a {}s window)".format(
retry_in, ban_count, ban_window)
self.ip_header = ip_header
self.ips = {}
self.bypass_routes = set()
if app:
self.init_app(app)
def _get_ip(self) -> str:
"""Returns the IP of the client"""
if self.ip_header:
return request.headers.get(self.ip_header) or "no-ip"
return request.remote_addr
def _create(self, ip):
"""add the IP to the tracked dict"""
if ip not in self.ips:
self.ips[ip] = IP(ban_count=self.ban_count if self.ban_enabled else 0,
rate_count=self.rate_count if self.rate_limit_enabled else 0)
def _before_request(self):
"""Function which runs before every request
if the client is either banned or rate-limited, we short-circuit the response
and reply directly with the appropriate message.
"""
if request.endpoint not in self.bypass_routes: # avoid routes with the @bypass decorator, or if they use specific limits with override
ip = self._get_ip()
self._create(ip)
if self.ban_enabled and self._is_ip_banned(ip):
return self._ban_func(self.ban_count, self.ban_window, self._banned_for(ip)), 403
if self.rate_limit_enabled and self._is_ip_rate_limited(ip):
return self._rate_limit_func(self.rate_count, self.rate_window, self._rate_limited_for(ip)), 429
self._add(ip)
def _add(self, ip):
"""add a request to this IP tracked"""
self.ips[ip].add_entry()
def _banned_for(self, ip) -> int:
"""returns the time in seconds this IP is banned for"""
return int((self.ips[ip].ban_entries[-1] + self.ban_duration) - time.time())
def _rate_limited_for(self, ip) -> int:
"""returns the time in seconds this IP is rate limited for"""
rate_entries = [e for e in self.ips[ip].rate_entries if e >= time.time() - self.rate_window]
return int((rate_entries[0] + self.rate_window) - time.time()) or 0
def _is_ip_banned(self, ip) -> bool:
"""returns whether this IP is currently banned or not
"""
# have we too much counts for our interval
if not self.ips[ip].ban_active and len([e for e in self.ips[ip].ban_entries if e >= time.time() - self.ban_window]) >= self.ban_count:
self.ips[ip].ban_active = True
# is the last entry still in our ban duration ?
if self.ips[ip].ban_active:
if time.time() <= self.ips[ip].ban_entries[-1] + self.ban_duration:
return True
self.ips[ip].ban_active = False
return False
def _is_ip_rate_limited(self, ip) -> bool:
"""returns whether this IP is currently rate limited or not"""
# in the last rate_interval, did we had more entries than rate_count ?
if len([e for e in self.ips[ip].rate_entries if e >= time.time() - self.rate_window]) >= self.rate_count:
return True
return False
def init_app(self, app):
"""add our before request to flask now"""
app.before_request(self._before_request)
def report(self, ip=None):
"""Report an IP.
If no ip arg is provided, uses the ip_header arg provided to the GateKeeper instance
"""
client_ip = ip or self._get_ip()
self.ips[client_ip].add_report()
def bypass(self, route):
"""do not apply rate-limiting to this route"""
@wraps(route)
def wrapper(*a, **k):
return route(*a, **k)
# We store the name of the function associated with the route, not the path of the route
self.bypass_routes.add(route.__name__)
return wrapper
def specific(self, rate_limit_rule, standalone=False, ip_header=None):
"""Route specific gatekeeper. Only for rate limiting purposes.
By defaults the specific rate_limit rule is set on top of the global instance rule.
A use-case could be a global per-minute rule, and a per-second bursting rule here
If you want to set a unique rate limite rule, set `standalone` to True.
You can supply a different ip_header, otherwise it will default to the instance configuration.
"""
specific_gk = GateKeeper(rate_limit_rule=rate_limit_rule,
ip_header=ip_header or self.ip_header)
def decorator(route):
@wraps(route)
def wrapper(*args, **kwargs):
# We reproduce the same behavior as our _before_request func here
# but for the gk instance tied to this route
ip = specific_gk._get_ip()
specific_gk._create(ip)
if specific_gk.rate_limit_enabled and specific_gk._is_ip_rate_limited(ip):
return self._rate_limit_func(specific_gk.rate_count, specific_gk.rate_window, specific_gk._rate_limited_for(ip)), 429
specific_gk._add(ip)
return route(*args, **kwargs)
# remove ourselves from the global instance _before_request
if standalone:
self.bypass_routes.add(route.__name__)
return wrapper
return decorator
|
'''
png2svg.py - arrange several bitmat images into a single image
==============================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
This script takes a collection of .png files and outputs an .svg file
arranging the images side by side or in different layers.
Usage
-----
Example::
python png2svg.py --help
Type::
python png2svg.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import string
import math
import tempfile
import random
import CGAT.Experiment as E
import CGAT.SVGdraw as SVGdraw
from types import *
class Image:
def __init__(self, filename, x, y, width, height, opacity=1.0):
self.mFilename = filename
self.mWidth = width
self.mHeight = height
self.mX = x
self.mY = y
self.mOpacity = opacity
def getSVG(self, xoffset, yoffset):
return SVGdraw.image(self.mFilename,
self.mX + xoffset,
self.mY + yoffset,
self.mWidth,
self.mHeight,
opacity=self.mOpacity)
class PngPlot:
def __init__(self, filenames):
self.mElements = {}
self.mFilenames = filenames
# footer
self.mFooterFrom = 10
self.mFooterFontSize = 36
self.mFooterFont = "Verdana"
self.mFooter = None
# Title
self.mTitleFontSize = 36
self.mTitleFont = "Verdana"
self.mTitle = None
# Row/column labels
self.mLabelFontSize = 36
self.mLabelFont = "Verdana"
# default data area size (without terminal node labels)
self.mDefaultHeight = 1000
self.mDefaultWidth = 1000
self.mSeparatorWidth = 10
self.mSeparatorHeight = 10
self.mKeepAspectRatio = True
self.mMapImage2Coords = []
#####################################################################
def setCanvasSize(self, width, height):
"""set canvas width and size."""
self.mDefaultWidth, self.mDefaultHeight = width, height
#####################################################################
def addElement(self, element, plane=0):
"""add element to list in plane.
The list is later sorted by priority.
"""
if plane not in self.mElements:
self.mElements[plane] = []
self.mElements[plane].append(element)
def getHeaderHeight(self):
return 0
def getHeaderWidth(self):
return 0
def getFooterHeight(self):
return 0
def getFooterWidth(self):
return 0
#####################################################################
def addElements(self, elements, plane=0):
"""add multiple elments to a plane."""
for e in elements:
self.addElement(e, plane)
#####################################################################
def calculateCanvasSize(self):
"""calculate the size of the canvas."""
# set the page size
self.mPageWidth = self.getHeaderWidth(
) + self.mDataWidth + self.getFooterWidth()
self.mPageHeight = self.getHeaderHeight(
) + self.mDataHeight + self.getFooterHeight()
#####################################################################
def initializePlot(self):
"""set various coordinates in the plot.
Note:
Width = X = coordinate 1
Height = Y = coordinate 2
"""
self.calculateCoordinates()
self.calculateCanvasSize()
#####################################################################
def setTitle(self, title):
"""set title."""
self.mTitle = title
#####################################################################
def setFooter(self, footer):
"""set footer."""
self.mFooter = footer
#####################################################################
def writeImages(self):
"""write images."""
xoffset = self.getHeaderWidth()
yoffset = self.getHeaderHeight()
for i in range(len(self.mFilenames)):
image = self.mMapImage2Coords[i]
e = image.getSVG(xoffset, yoffset)
self.addElement(e)
#####################################################################
def writeTitle(self):
"""write title into plot."""
if self.mTitle:
e = SVGdraw.text(self.mPageWidth / 2,
self.mTitleFontSize,
self.mTitle,
self.mTitleFontSize,
self.mTitleFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="middle")
self.addElement(e)
#####################################################################
def writeFooter(self):
"""write footer.
The footer contains the legend.
"""
if self.mTitle:
e = SVGdraw.text(self.mPageWidth / 2,
self.mTitleFontSize,
self.mTitle,
self.mTitleFontSize,
self.mTitleFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="middle")
self.addElement(e)
#####################################################################
def finalizePlot(self):
"""build plot."""
self.writeImages()
self.writeTitle()
self.writeFooter()
#####################################################################
def writeToFile(self, outfile):
"""write svg image to file.
"""
self.finalizePlot()
kk = self.mElements.keys()
kk.sort()
kk.reverse()
# make sure the image size is ok
min_x, min_y, max_x, max_y = 0, 0, 0, 0
for k in kk:
for e in self.mElements[k]:
for x in ('x', 'x2', 'x1'):
if x in e.attributes:
v = e.attributes[x]
min_x = min(min_x, v)
max_x = max(max_x, v)
for y in ('y', 'y2', 'y1'):
if y in e.attributes:
v = e.attributes[y]
min_y = min(min_y, v)
max_y = max(max_y, v)
min_x, min_y = int(math.floor(min_x)), int(math.floor(min_y))
max_x, max_y = int(math.floor(max_x)), int(math.floor(max_y))
for k in kk:
for e in self.mElements[k]:
for x in ('x', 'x2', 'x1'):
if x in e.attributes:
e.attributes[x] -= min_x
for x in ('y', 'y2', 'y1'):
if y in e.attributes:
e.attributes[y] -= min_y
# now add all the elements
self.mRoot = SVGdraw.drawing()
self.mDraw = SVGdraw.svg(
(0, 0, self.mPageWidth - min_x, self.mPageHeight - min_y), "100%", "100%")
for k in kk:
for e in self.mElements[k]:
self.mDraw.addElement(e)
self.mRoot.setSVG(self.mDraw)
tfile = tempfile.mktemp()
self.mRoot.toXml(tfile)
lines = open(tfile, "r").readlines()
outfile.write(string.join(lines, ""))
outfile.write("\n")
os.remove(tfile)
class Grid(PngPlot):
"""grid layout."""
def __init__(self, filenames, force_square=False, *args, **kwargs):
PngPlot.__init__(self, filenames, *args, **kwargs)
self.mForceSquare = force_square
#####################################################################
def calculateCoordinates(self):
"""calculate coordinates."""
# square image
nimages = len(self.mFilenames)
nimages_x = int(
math.sqrt(float(nimages) * (float(self.mDefaultWidth) / self.mDefaultHeight)))
if self.mForceSquare:
nimages_y = nimages_x
del self.mFilenames[nimages_y * nimages_x:]
else:
nimages_y = int(math.ceil(float(nimages) / nimages_x))
image_width = int(
float((self.mDefaultWidth - (nimages_x * self.mSeparatorWidth)) / nimages_x))
# add +1 for image_height, as the we use left/upper coordiantes
image_height = int(
float((self.mDefaultHeight - ((nimages_y) * self.mSeparatorHeight)) / (nimages_y)))
if self.mKeepAspectRatio:
m = min(image_width, image_height)
image_width, image_height = m, m
coord_y = -(self.mSeparatorHeight + image_height)
coord_x = 0
for x in range(len(self.mFilenames)):
if x % nimages_x == 0:
coord_x = 0
coord_y += self.mSeparatorHeight + image_height
self.mMapImage2Coords.append(
Image(self.mFilenames[x], coord_x, coord_y, image_width, image_height))
coord_x += self.mSeparatorWidth + image_width
self.mDataWidth = self.mDefaultWidth
self.mDataHeight = self.mDefaultHeight
class RandomLayers(PngPlot):
"""elements are positioned randomly in several layers."""
def __init__(self, filenames,
num_layers,
image_width=100,
image_height=100,
*args, **kwargs):
PngPlot.__init__(self, filenames, *args, **kwargs)
self.mNumLayers = num_layers
self.mImageHeight = image_height
self.mImageWidth = image_width
#####################################################################
def calculateCoordinates(self):
"""calculate coordinates."""
nimages = len(self.mFilenames)
images_per_layer = int(float(nimages) / self.mNumLayers)
opacity_intervall = 1.0 / self.mNumLayers
opacity = 0
for x in range(nimages):
if x % images_per_layer == 0:
opacity += opacity_intervall
image_width = self.mImageWidth
image_height = self.mImageHeight
coord_x = random.randint(0, self.mDefaultWidth - image_width)
coord_y = random.randint(0, self.mDefaultHeight - image_height)
self.mMapImage2Coords.append(
Image(self.mFilenames[x], coord_x, coord_y, image_width, image_height, opacity=opacity))
self.mDataWidth = self.mDefaultWidth
self.mDataHeight = self.mDefaultHeight
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: png2svg.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("-i", "--title", dest="title", type="string",
help="page title.")
parser.add_option("-f", "--footer", dest="footer", type="string",
help="page footer.")
parser.add_option("-l", "--layout", dest="layout", type="choice",
choices=("grid", "random", "random-layers",),
help="layout to choose.")
parser.add_option("--num-layers", dest="num_layers", type="int",
help="number of layers.")
parser.add_option("--canvas-width", dest="canvas_width", type="int",
help="canvas width [default: 1000].")
parser.add_option("--canvas-height", dest="canvas_height", type="int",
help="canvas height [default: 1000].")
parser.add_option("--image-width", dest="image_width", type="int",
help="image width.")
parser.add_option("--image-height", dest="image_height", type="int",
help="image height.")
parser.add_option("--force-square", dest="force_square", action="store_true",
help="force square layout.")
parser.set_defaults(
titles="",
title="",
footer="",
layout="grid",
force_square=False,
num_layers=2,
image_width=100,
image_height=100,
canvas_width=1000,
canvas_height=1000,
)
(options, args) = E.Start(parser, add_pipe_options=True)
if len(args) > 0:
# read filenames from the command line
filenames = args
else:
# read filenames from stdin
filenames = map(lambda x: x[:-1], sys.stdin.readlines())
if options.loglevel >= 1:
options.stdlog.write("# arranging %i images.\n" % len(filenames))
if options.layout == "grid":
plot = Grid(filenames, force_square=options.force_square)
elif options.layout == "random":
plot = RandomLayers(filenames,
num_layers=1,
image_width=options.image_width,
image_height=options.image_height)
elif options.layout == "random-layers":
plot = RandomLayers(filenames,
num_layers=options.num_layers,
image_width=options.image_width,
image_height=options.image_height)
else:
raise "unknown layout %s" % options.layout
plot.setCanvasSize(options.canvas_width, options.canvas_height)
plot.initializePlot()
plot.writeToFile(sys.stdout)
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
"""
Django forms for the JASMIN services app.
"""
__author__ = "Matt Pryor"
__copyright__ = "Copyright 2015 UK Science and Technology Facilities Council"
from django import forms
from django.contrib.contenttypes.models import ContentType
from .models import Metadatum
class MetadataForm(forms.Form):
"""
Form that can attach the collected data as metadata on an object.
"""
def save(self, obj):
"""
Saves the form's cleaned_data as metadata on the given object.
.. warning::
The object must be saved before calling this method.
"""
content_type = ContentType.objects.get_for_model(obj)
# Remove any existing metadata for the object
Metadatum.objects.filter(content_type = content_type, object_id = obj.pk).delete()
for key, value in self.cleaned_data.items():
Metadatum.objects.create(
content_type = content_type, object_id = obj.pk,
key = key, value = value
)
|
# -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2018-07-31 20:35:41
# @Last Modified by: yulidong
# @Last Modified time: 2018-08-07 21:21:19
import os
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
import time
import cv2
import torch
COLOR=[np.array([255,0,0]),
np.array([0,255,0]),
np.array([0,0,255]),
np.array([125,125,0]),
np.array([0,125,125]),
np.array([125,0,125]),
np.array([50,100,50]),
np.array([100,50,100])
]
def cluster(prediction, bandwidth):
ms = MeanShift(bandwidth, bin_seeding=True)
print ('Mean shift clustering, might take some time ...')
tic = time.time()
ms.fit(prediction)
print ('time for clustering', time.time() - tic)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
num_clusters = cluster_centers.shape[0]
return num_clusters, labels, cluster_centers
def get_instance_masks(prediction, bandwidth):
batch_size, h, w, feature_dim = prediction.shape
instance_masks = []
for i in range(batch_size):
num_clusters, labels, cluster_centers = cluster(prediction[i].reshape([h*w, feature_dim]), bandwidth)
print ('Number of predicted clusters', num_clusters)
labels = np.array(labels, dtype=np.uint8).reshape([h,w])
mask = np.zeros([h,w], dtype=np.uint8)
num_clusters = min([num_clusters,8])
for mask_id in range(num_clusters):
ind = np.where(labels==mask_id)
mask[ind] = np.uint8(255/num_clusters*mask_id)
instance_masks.append(mask)
return np.array(instance_masks)
def save_instance_masks(prediction,output_dir, bandwidth, count):
batch_size, h, w, feature_dim = prediction.shape
instance_masks = []
for i in range(batch_size):
num_clusters, labels, cluster_centers = cluster(prediction[i].reshape([h*w, feature_dim]), bandwidth)
print ('Number of predicted clusters', num_clusters)
labels = np.array(labels, dtype=np.uint8).reshape([h,w])
mask = np.zeros([h,w,3], dtype=np.uint8)
num_clusters = min([num_clusters,8])
for mask_id in range(num_clusters):
mask = np.zeros([h,w,3], dtype=np.uint8)
ind = np.where(labels==mask_id)
mask[ind] = np.array([255,255,255])
output_file_name = os.path.join(output_dir, 'cluster_{}_{}.png'.format(str(count).zfill(4), str(mask_id)))
cv2.imwrite(output_file_name, mask)
instance_masks.append(mask)
return instance_masks
def mean_shift(feature,mean,bandwidth):
#feature shape c h w
dis=feature-mean
dis=torch.norm(dis,dim=0)
mask=torch.where(dis<bandwidth,torch.tensor(1).cuda(),torch.tensor(0).cuda()).float()
mean=torch.sum((feature*mask).view(feature.shape[0],feature.shape[1]*feature.shape[2]),dim=1)/torch.sum(mask)
return mean
def get_mask(feature,mean,bandwidth):
mean=mean.view([mean.shape[0],1,1])
dis=feature-mean
dis=torch.norm(dis,dim=0)
mask=torch.where(dis<bandwidth,torch.tensor(1).cuda(),torch.tensor(0).cuda())
pixels=mask.nonzero()
#print(torch.sum(mask))
if torch.sum(mask)<400:
return mask.float(),torch.tensor(0).float().cuda()
minx=torch.min(pixels[:,0])
maxx=torch.max(pixels[:,0])
miny=torch.min(pixels[:,1])
maxy=torch.max(pixels[:,1])
#areas=torch.ceil((maxx-minx).float()/60)*torch.ceil((maxy-miny).float()/60)
for i in range(1,torch.ceil((maxx-minx).float()/60).int()+1):
for j in range(1,torch.ceil((maxy-miny).float()/60).int()+1):
if torch.sum(mask[minx+60*(i-1):minx+60*i,miny+60*(j-1):miny+60*j])>400:
mask[minx+60*(i-1):minx+60*i,miny+60*(j-1):miny+60*j]*=i*j
areas=torch.unique(mask).sort()[0]
for i in range(1,len(areas)):
mask=torch.where(mask==areas[i],-torch.tensor(i).cuda(),mask)
mask=-mask
areas=len(areas)-1
#print(torch.sum(mask))
return mask.float(),torch.tensor(areas).float().cuda()
def re_label(mask,area,bandwidth):
index=torch.sum(area)
print(index)
count=torch.tensor(0).float().cuda()
for i in range(area.shape[0]):
mask[i,:,:]=torch.where(mask[i,:,:]>0,mask[i,:,:]+count,mask[i,:,:])
count+=area[i]
segment=torch.where(mask>0,torch.tensor(1).cuda(),torch.tensor(0).cuda()).float()
final=torch.sum(mask,dim=0)/torch.sum(segment,dim=0)
final=torch.squeeze(final)
final=final/255
return mask,area,final
def fast_cluster(feature,bandwidth=0.7):
masks=[]
areas=[]
segments=[]
for i in range(feature.shape[0]):
n_feature=feature[i,...]
label=torch.zeros(n_feature.shape[1],n_feature.shape[2]).cuda().float()
n_masks=[]
n_areas=[]
while(torch.min(label)==0):
candidate=torch.where(label==0,torch.tensor(1).float().cuda(),torch.tensor(0).float().cuda()).nonzero()
#print(len(candidate))
seed=torch.randint(len(candidate),(1,))[0].long()
mean=n_feature[:,candidate[seed][0].long(),candidate[seed][1].long()].view(n_feature.shape[0],1,1)
mean=mean_shift(n_feature, mean, bandwidth)
t_masks,t_areas=get_mask(n_feature, mean, bandwidth)
#print(torch.sum(t_masks))
label=label+t_masks
#print(torch.sum(label))
if t_areas>0:
n_masks.append(t_masks)
n_areas.append(t_areas)
mask,count,region=re_label(torch.stack(n_masks),torch.stack(n_areas),bandwidth=0.7)
masks.append(mask)
areas.append(count)
segments.append(region)
masks=torch.stack(masks)
areas=torch.stack(areas)
segments=torch.stack(segments)
return masks,areas,segments
|
# -*- coding: utf8 -*-
from qrcodeocr.app import api_statistics
from qrcodeocr.test.test_base import TestBase
class TestApiStatistics(TestBase):
def test_ApiFundStats(self):
"""
test fund statistics api
:return:
"""
res = api_statistics.get_fund_stats(howfund_id=self.howfund_id,
start_date=self.start_date,
end_date=self.end_date)
self.assertIsNotNone(res)
self.assertGreater(len(res), 0)
def test_ApiPortfolioStats(self):
"""
test potfolio statistics api
:return:
"""
portfolio_df = self.mysqlutil.get_portfolio_df(self.portfolio_id)
res = api_statistics.get_portfolio_stats(portfolio_df=portfolio_df,
start_date=self.start_date,
end_date=self.end_date)
self.assertIsNotNone(res)
self.assertGreater(len(res), 0) |
from tensorflow.keras.layers import Input, Conv2D, LeakyReLU, Concatenate
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
from convhole import ConvHole2D
def get_blindspot_model(input_shape, out_channels, depth=11, kernel_initializer=None, bias_initializer=None):
inputs = Input(shape=input_shape)
basic_convs = [inputs]
basic_rf = 0
for c in range(depth - 1):
res = Conv2D(128, 3, padding="same",
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(basic_convs[-1])
res = LeakyReLU(alpha=0.1)(res)
basic_convs.append(res)
basic_rf = (basic_rf + 2) if c != 0 else (basic_rf + 3)
hole_rf = 0
hole_convs = []
for c in range(depth):
res = ConvHole2D(18, 3, dilation_rate=c+1, padding="same",
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(basic_convs[c])
res = LeakyReLU(alpha=0.1)(res)
hole_convs.append(res)
hole_rf = (hole_rf + 4) if c != 0 else (hole_rf + 3)
concat_axis = 1 if K.image_data_format() == "channels_first" else -1
x = Concatenate(axis=concat_axis)(hole_convs)
# Output stages.
x = Conv2D(198, 1, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = LeakyReLU(alpha=0.1)(x)
x = Conv2D(99, 1, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = LeakyReLU(alpha=0.1)(x)
x = Conv2D(99, 1, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
x = LeakyReLU(alpha=0.1)(x)
x = Conv2D(out_channels, 1, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
return Model(inputs=inputs, outputs=x)
|
# Mininet defaults
from mininet.node import OVSSwitch
from mininet.node import UserSwitch
# from mininet.node import OVSLegacyKernelSwitch
# Custom switches
|
from datetime import date, timedelta
from dateutil.parser import parse
def working_days(year, weekend=(5, 6), holidays=tuple()):
year_start, year_end = date(year, 1, 1), date(year, 12, 31)
year_range = range((year_end - year_start).days + 1)
year_dates = (year_start + timedelta(x) for x in year_range)
yield from (x for x in year_dates if not (
x.weekday() in weekend or x in [parse(h).date() for h in holidays]))
|
import os
import pickle
import numpy as np
from PIL import Image
def unpickle(fp):
load_dict = 0
with open(fp, 'rb') as fid:
load_dict = pickle.load(fid, encoding = 'bytes')
return load_dict
'''
https://github.com/amir-saniyan/AlexNet/blob/master/dataset_helper.py
'''
def get_cifar_10(image_width, image_height, cifar_path = '../cifar-10-batches-py'):
batch_1 = unpickle(os.path.abspath(os.path.join(cifar_path, 'data_batch_1')))
batch_2 = unpickle(os.path.abspath(os.path.join(cifar_path, 'data_batch_2')))
batch_3 = unpickle(os.path.abspath(os.path.join(cifar_path, 'data_batch_3')))
batch_4 = unpickle(os.path.abspath(os.path.join(cifar_path, 'data_batch_4')))
batch_5 = unpickle(os.path.abspath(os.path.join(cifar_path, 'data_batch_5')))
test_batch = unpickle(os.path.abspath(os.path.join(cifar_path, 'test_batch')))
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
train_samples = len(batch_1[b'labels']) + len(batch_2[b'labels']) + len(batch_3[b'labels'])\
+ len(batch_4[b'labels']) + len(batch_5[b'labels'])
X_train = np.zeros(shape = [train_samples, image_width, image_height, 3], dtype=np.uint8)
Y_train = np.zeros(shape=[train_samples, len(classes)], dtype=np.float32)
batches = [batch_1, batch_2, batch_3, batch_4, batch_5]
index = 0
for batch in batches:
for i in range(len(batch[b'labels'])):
image = batch[b'data'][i].reshape(3, 32, 32).transpose([1, 2, 0])
label = batch[b'labels'][i]
X = np.array(Image.fromarray(image).resize((image_width, image_height)))
Y = np.zeros(shape=[len(classes)], dtype=np.int)
Y[label] = 1
X_train[index + i] = X
Y_train[index + i] = Y
index += len(batch[b'labels'])
test_samples = len(test_batch[b'labels'])
X_test = np.zeros(shape = [test_samples, image_width, image_height, 3], dtype=np.uint8)
Y_test = np.zeros(shape = [test_samples, len(classes)], dtype=np.float32)
for i in range(len(test_batch[b'labels'])):
image = test_batch[b'data'][i].reshape(3, 32, 32).transpose([1, 2, 0])
label = test_batch[b'labels'][i]
X = np.array(Image.fromarray(image).resize((image_width, image_height)))
Y = np.zeros(shape=[len(classes)], dtype=np.int)
Y[label] = 1
X_test[i] = X
Y_test[i] = Y
X_train = np.transpose(X_train, axes=[0, 3, 2, 1])
X_test = np.transpose(X_test, axes=[0, 3, 2, 1])
return X_train, Y_train, X_test, Y_test
if __name__ == "__main__":
x, _, _, _ = get_cifar_10(70, 70)
print(x.shape)
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import json
from copy import deepcopy
from bson.objectid import ObjectId
from turbo.util import escape, basestring_type
from util import unittest
class EscapeTest(unittest.TestCase):
def test_inc(self):
pass
def test_to_str(self):
data = {
'v1': 10,
'v2': datetime.datetime.now(),
'v3': ObjectId(),
'v4': 'value',
}
self.assertTrue(isinstance(json.dumps(escape.to_str(
[deepcopy(data) for i in range(10)])), basestring_type))
self.assertTrue(isinstance(json.dumps(
escape.to_str(deepcopy(data))), basestring_type))
def test_to_str_encode(self):
data = {
'v1': 10,
'v2': datetime.datetime.now(),
'v3': ObjectId(),
'v4': 'value',
}
v = escape.to_str(data)
self.assertTrue(isinstance(v['v1'], int))
self.assertTrue(isinstance(v['v2'], float))
self.assertTrue(isinstance(v['v3'], basestring_type))
self.assertTrue(isinstance(v['v4'], basestring_type))
def encode(v):
return str(v)
v = escape.to_str(data, encode)
self.assertTrue(isinstance(v['v1'], basestring_type))
self.assertTrue(isinstance(v['v2'], basestring_type))
self.assertTrue(isinstance(v['v3'], basestring_type))
self.assertTrue(isinstance(v['v4'], basestring_type))
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10
@author: jaehyuk
"""
from random import random
import numpy as np
import scipy.stats as ss
import scipy.optimize as sopt
import pyfeng as pf
import scipy.integrate as spint
from . import bsm
'''
MC model class for Beta=1
'''
class ModelBsmMC:
beta = 1.0 # fixed (not used)
vov, rho = 0.0, 0.0
sigma, intr, divr = None, None, None
bsm_model = None
'''
You may define more members for MC: time step, etc
'''
def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0):
self.sigma = sigma
self.vov = vov
self.rho = rho
self.intr = intr
self.divr = divr
self.bsm_model = pf.Bsm(sigma, intr=intr, divr=divr)
def bsm_vol(self, strike, spot, texp=None, sigma=None):
''''
From the price from self.price() compute the implied vol
Use self.bsm_model.impvol() method
'''
price= self.price(strike,spot,texp,sigma)
return self.bsm_model.impvol(price=price,strike=strike, spot=spot, texp=texp)
def price(self, strike, spot, texp=None, sigma=None, cp=1,random =False):
'''
Your MC routine goes here
Generate paths for vol and price first. Then get prices (vector) for all strikes
You may fix the random number seed
'''
#Generate paths for vol
time_step=0.05
n= int(texp/time_step)
if random ==False:
np.random.seed(12345)
Z_1 =np.random.normal(size=(n,10000))
if random ==False:
np.random.seed(12346)
X_1 = np.random.normal(size=(n,10000))
W_1 = self.rho * Z_1 + np.sqrt(1 - self.rho**2)*X_1
sigma_t = self.sigma if sigma is None else sigma
s_t = spot
for t in range(n):
s_t = s_t * np.exp(sigma_t*np.sqrt(time_step)*W_1[t,:] - 0.5*sigma_t**2*time_step)
sigma_t = sigma_t * np.exp(self.vov*np.sqrt(time_step)*Z_1[t,:] - 0.5*self.vov**2*time_step)
final_price = s_t
prices = []
for strike_i in strike:
price = np.mean(np.fmax(cp*(final_price - strike_i), 0))
prices.append(price)
return np.array(prices)
#return np.zeros_like(strike)
'''
MC model class for Beta=0
'''
class ModelNormalMC:
beta = 0.0 # fixed (not used)
vov, rho = 0.0, 0.0
sigma, intr, divr = None, None, None
normal_model = None
def __init__(self, sigma, vov=0, rho=0.0, beta=0.0, intr=0, divr=0):
self.sigma = sigma
self.vov = vov
self.rho = rho
self.intr = intr
self.divr = divr
self.normal_model = pf.Norm(sigma, intr=intr, divr=divr)
def norm_vol(self, strike, spot, texp=None, sigma=None):
''''
From the price from self.price() compute the implied vol.
Use self.normal_model.impvol() method
'''
#return np.zeros_like(strike)
price= self.price(strike,spot,texp,sigma)
return self.normal_model.impvol(price=price,strike=strike, spot=spot, texp=texp)
def price(self, strike, spot, texp=None, sigma=None, cp=1,random = False):
'''
Your MC routine goes here
Generate paths for vol and price first. Then get prices (vector) for all strikes
You may fix the random number seed
'''
time_step=0.05
n= int(texp/time_step)
if random==False:
np.random.seed(12345)
Z_1 =np.random.normal(size=(n,10000))
if random==False:
np.random.seed(12346)
X_1 = np.random.normal(size=(n,10000))
W_1 = self.rho * Z_1 + np.sqrt(1 - self.rho**2)*X_1
sigma_t = self.sigma if sigma is None else sigma
s_t = spot
for t in range(n):
s_t = s_t + sigma_t*W_1[t,:]*np.sqrt(time_step)
sigma_t = sigma_t * np.exp(self.vov*np.sqrt(time_step)*Z_1[t,:] - 0.5*self.vov**2*time_step)
final_price = s_t
prices = []
for strike_i in strike:
price = np.mean(np.fmax(cp*(final_price - strike_i), 0))
prices.append(price)
return np.array(prices)
'''
Conditional MC model class for Beta=1
'''
class ModelBsmCondMC:
beta = 1.0 # fixed (not used)
vov, rho = 0.0, 0.0
sigma, intr, divr = None, None, None
bsm_model = None
time_step = 0.01
samples = 10000
'''
You may define more members for MC: time step, etc
'''
def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0):
self.sigma = sigma
self.vov = vov
self.rho = rho
self.intr = intr
self.divr = divr
self.bsm_model = pf.Bsm(sigma, intr=intr, divr=divr)
def bsm_vol(self, strike, spot, texp=None):
''''
should be same as bsm_vol method in ModelBsmMC (just copy & paste)
'''
price= self.price(strike,spot,texp)
return self.bsm_model.impvol(price=price,strike=strike, spot=spot, texp=texp)
def price(self, strike, spot, texp=None, cp=1,random = False):
'''
Your MC routine goes here
Generate paths for vol only. Then compute integrated variance and BSM price.
Then get prices (vector) for all strikes
You may fix the random number seed
'''
if random == False:
np.random.seed(12345)
time_step=0.05
n= int(texp/time_step)
Z_1 =np.random.normal(size=(n,10000))
sigma_path = np.ones_like(Z_1) *self.sigma
for t in range(1,n-1):
sigma_path[t+1,:] = sigma_path[t,:] * np.exp(self.vov*np.sqrt(time_step)*Z_1[t,:] - 0.5*self.vov**2*time_step)
int_var = spint.simps(sigma_path**2, dx=1, axis=0)*time_step
sigma_final = sigma_path[-1,:]
S_0 = spot*np.exp(self.rho*(sigma_final - self.sigma)/self.vov - 0.5*((self.rho**2)*(self.sigma**2)*texp*int_var))
sigma_bs = np.sqrt((1-self.rho**2)*int_var)
bsm =pf.Bsm(sigma_bs, intr=0, divr=0)
prices = []
for strike_i in strike:
price = np.mean(bsm.price(spot=S_0,strike = strike_i,texp=texp,cp=cp))
prices.append(price)
return np.array(prices)
#return np.zeros_like(strike)
'''
Conditional MC model class for Beta=0
'''
class ModelNormalCondMC:
beta = 0.0 # fixed (not used)
vov, rho = 0.0, 0.0
sigma, intr, divr = None, None, None
normal_model = None
def __init__(self, sigma, vov=0, rho=0.0, beta=0.0, intr=0, divr=0):
self.sigma = sigma
self.vov = vov
self.rho = rho
self.intr = intr
self.divr = divr
self.normal_model = pf.Norm(sigma, intr=intr, divr=divr)
def norm_vol(self, strike, spot, texp=None):
''''
should be same as norm_vol method in ModelNormalMC (just copy & paste)
'''
price= self.price(strike,spot,texp)
return self.normal_model.impvol(price=price,strike=strike, spot=spot, texp=texp)
def price(self, strike, spot,texp=None, cp=1,random = False):
'''
Your MC routine goes here
Generate paths for vol only. Then compute integrated variance and normal price.
You may fix the random number seed
'''
if random == False:
np.random.seed(12345)
time_step=0.05
n= int(texp/time_step)
Z_1 =np.random.normal(size=(n,10000))
sigma_path = np.ones_like(Z_1) *self.sigma
for t in range(1,n-1):
sigma_path[t+1,:] = sigma_path[t,:] * np.exp(self.vov*np.sqrt(time_step)*Z_1[t,:] - 0.5*self.vov**2*time_step)
int_var = spint.simps(sigma_path**2, dx=1, axis=0)*time_step
sigma_final = sigma_path[-1,:]
#print('sigma_final:',sigma_final)
S_0 = spot+ self.rho*(sigma_final-self.sigma)/self.vov
#sigma_n = self.sigma*np.sqrt((1-self.rho**2)*int_var)
sigma_n = np.sqrt((1-self.rho**2)*int_var)
norm =pf.Norm(sigma_n, intr=0, divr=0)
prices = []
for strike_i in strike:
price = np.mean(norm.price(spot=S_0,strike = strike_i,texp=texp,cp=cp))
prices.append(price)
return np.array(prices)
|
empty = object() |
#!/usr/bin/env python
"""
MegaPBTree, non-redundant set of gene annotations for
combining collapsed GFFs from mulitiple smaples.
The original file name: combine_abundance_across_samples.py
"""
import copy
from collections import defaultdict
from pbtranscript.collapsing import IntervalTree, compare_fuzzy_junctions
from pbtranscript.collapsing.cluster import ClusterTree
from pbtranscript.io import (CollapseGffReader, CollapseGffWriter, CollapseGffRecord,
GmapRecord, GroupReader, GroupWriter, GroupRecord,
MegaInfoWriter, MergeGroupOperation)
__author__ = 'etseng@pacificbiosciences.com'
__all__ = ['MegaPBTree']
def read_gff_as_interval_tree(gff_filename):
"""
Read a collapsed GFF file into an IntervalTree
"""
tree = defaultdict(
lambda: {'+': IntervalTree(), '-': IntervalTree()}) # chr --> strand --> tree
for r in CollapseGffReader(gff_filename):
tree[r.chr][r.strand].insert(r.start, r.end, r)
return tree
class MegaPBTree(object):
"""
Structure for maintaining a non-redundant set of gene annotations
Used to combine with different collapsed GFFs from different samples
"""
def __init__(self, gff_filename, group_filename, self_prefix=None, max_fuzzy_junction=0):
self.gff_filename = gff_filename
self.group_filename = group_filename
self.self_prefix = self_prefix
self.max_fuzzy_junction = max_fuzzy_junction
self.record_d = dict((r.seqid, r)
for r in CollapseGffReader(gff_filename))
self.tree = read_gff_as_interval_tree(
gff_filename=self.gff_filename) # chr --> strand -->tree
# ex: PB.1.1 --> [ RatHeart|i3_c123.... ]
self.group_info = MegaPBTree.read_group(
self.group_filename, self.self_prefix)
# keep track of gff|group files that has been added.
self._sample_prefixes = []
self._group_filenames = []
self._gff_filenames = []
self._add_sample_files(
gff_filename=gff_filename, group_filename=group_filename, sample_prefix="first_sample")
def __str__(self):
ret = ["MegaPBTree of %s samples" % len(self.gff_filename)]
for sample, gff_fn, group_fn in zip(self._sample_prefixes, self._gff_filenames, self._group_filenames):
ret.extend(["sample %s\n" % sample, "\t%s",
gff_fn, "\t%s" % group_fn])
return '\n'.join(ret)
@staticmethod
def read_group(group_filename, group_prefix):
"""read a group file and group_prefix to a dict
if group_prefix is None: return {group.pbid --> group.members}
else: {group.pbid --> [group_prefix+'|'+m for m in group.members]
"""
return {group.name: group.members
for group in GroupReader(group_filename, group_prefix)}
def match_record_to_tree(self, r):
"""
r --- GmapRecord
tree --- dict of chromosome --> strand --> IntervalTree
If exact match (every exon junction), return the matching GmapRecord
Otherwise return None
*NOTE*: the tree should be non-redundant so can return as soon as exact match is found!
"""
assert isinstance(r, GmapRecord)
matches = self.tree[r.chr][r.strand].find(r.start, r.end)
for r2 in matches:
#r.segments = r.ref_exons
#r2.segments = r2.ref_exons
# is a match!
if compare_fuzzy_junctions(r.ref_exons, r2.ref_exons, self.max_fuzzy_junction) == 'exact':
return r2
return None
def add_sample(self, gff_filename, group_filename, sample_prefix, o_gff_fn, o_group_fn, o_mega_fn):
"""Add one more sample to this MagaPBTree object.
Read gff file to get collapsed isoforms from new sample,
combine with existing collapsed isoforms and update tree.
"""
self._add_sample_files(
gff_filename=gff_filename, group_filename=group_filename, sample_prefix=sample_prefix)
# list of (r1 if r2 is None | r2 if r1 is None | longer of r1 or r2 if
# both not None)
combined = []
unmatched_recs = self.record_d.keys()
for r in CollapseGffReader(gff_filename):
match_rec = self.match_record_to_tree(r)
if match_rec is not None: # found a match! put longer of r1/r2 in
combined.append((match_rec, r))
try:
unmatched_recs.remove(match_rec.seqid)
except ValueError:
pass # already deleted, OK, this happens for single-exon transcripts
else: # r is not present in current tree
combined.append((None, r))
# put whatever is left from the tree in
for seqid in unmatched_recs:
combined.append((self.record_d[seqid], None))
# create a ClusterTree to re-calc the loci/transcripts
final_tree = defaultdict(
lambda: {'+': ClusterTree(0, 0), '-': ClusterTree(0, 0)})
for i, (r1, r2) in enumerate(combined):
if r2 is None or (r1 is not None and r1.end - r1.start > r2.end - r2.start):
final_tree[r1.chr][r1.strand].insert(r1.start, r1.end, i)
else:
final_tree[r2.chr][r2.strand].insert(r2.start, r2.end, i)
self.write_cluster_tree_as_gff(
final_tree, combined, group_filename, sample_prefix, o_gff_fn, o_group_fn, o_mega_fn)
def write_cluster_tree_as_gff(self, cluster_tree, rec_list, group_filename2, sample_prefix2, o_gff_fn, o_group_fn, o_mega_fn):
"""
Write ClusterTree (chr --> dict --> (start, end, rec_list_index)) as collapsedGFF format
Returns --- a new group_info!!!
rec_list --- a list of (r1, r2) where r1 and r2 are GmapRecord
"""
group_info2 = MegaPBTree.read_group(group_filename2, sample_prefix2)
new_group_info = {}
gff_writer = open(o_gff_fn, 'w')
group_writer = GroupWriter(o_group_fn)
f_mgroup_writer = MegaInfoWriter(o_mega_fn, self.self_prefix, sample_prefix2)
loci_index = 0
chroms = cluster_tree.keys()
chroms.sort()
for k in chroms:
for strand in ('+', '-'):
for dummy_s, dummy_e, rec_indices in cluster_tree[k][strand].getregions():
loci_index += 1
isoform_index = 0
for i in rec_indices:
isoform_index += 1
gene_id = "PB.{i}".format(i=loci_index)
tID = "{gene_id}.{j}".format(
gene_id=gene_id, j=isoform_index)
r1, r2 = rec_list[i]
assert isinstance(r1, GmapRecord) or r1 is None
assert isinstance(r2, GmapRecord) or r2 is None
if r1 is None: # r2 is not None
r = r2
new_group_info[tID] = group_info2[r2.seqid]
elif r2 is None: # r1 is not None
r = r1
new_group_info[tID] = self.group_info[r1.seqid]
else: # both r1, r2 are not empty
r = r1 if (r1.end - r1.start >
r2.end - r2.start) else r2
new_group_info[tID] = self.group_info[r1.seqid] + \
group_info2[r2.seqid]
# write merged new group
group_writer.writeRecord(GroupRecord(name=tID, members=new_group_info[tID]))
# write group merge operation
f_mgroup_writer.writeRecord(MergeGroupOperation(pbid=tID, group1=r1, group2=r2))
gff_writer.write("{chr}\tPacBio\ttranscript\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{gene_id}\"; transcript_id \"{tID}\";\n".format(
chr=k, s=r.start + 1, e=r.end, strand=strand, gene_id=gene_id, tID=tID))
for exon in r.ref_exons:
gff_writer.write("{chr}\tPacBio\texon\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{gene_id}\"; transcript_id \"{tID}\";\n".format(
chr=k, s=exon.start + 1, e=exon.end, strand=strand, gene_id=gene_id, tID=tID))
gff_writer.close()
group_writer.close()
f_mgroup_writer.close()
return new_group_info
def _add_sample_files(self, gff_filename, group_filename, sample_prefix):
"""Keep track of gff|group files that has been added."""
# keep track of gff|group filenames
self._gff_filenames.append(gff_filename)
self._group_filenames.append(group_filename)
self._sample_prefixes.append(sample_prefix)
|
import torch
from UnarySim.kernel import FSUMul
from UnarySim.stream import RNG, BinGen, BSGen
from UnarySim.metric import ProgError
import matplotlib.pyplot as plt
import time
import math
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def test_fsumul_in_stream():
bitwidth = 12
depth = 4
hwcfg = {
"width" : bitwidth,
"mode" : "bipolar",
"dimr" : 1,
"dima" : 0,
"rng" : "sobol",
"scale" : 1,
"depth" : 10,
"entry" : None,
"static" : False
}
swcfg = {
"rtype" : torch.float,
"stype" : torch.float,
"btype" : torch.float
}
col = 100
modes = ["bipolar", "unipolar"]
for mode in modes:
if mode == "unipolar":
input_prob_0 = torch.rand(col).mul(2**bitwidth).round().div(2**bitwidth).to(device)
input_prob_1 = torch.rand(col).mul(2**bitwidth).round().div(2**bitwidth).to(device)
elif mode == "bipolar":
input_prob_0 = torch.rand(col).mul(2).sub(1).mul(2**bitwidth).round().div(2**bitwidth).to(device)
input_prob_1 = torch.rand(col).mul(2).sub(1).mul(2**bitwidth).round().div(2**bitwidth).to(device)
hwcfg["mode"] = mode
hwcfg["width"] = depth
dut_mul = FSUMul(None, hwcfg, swcfg).to(device)
hwcfg["width"] = bitwidth
oVec = torch.mul(input_prob_0, input_prob_1).mul(2**bitwidth).round().div(2**bitwidth).to(device)
prob_0_PE = ProgError(input_prob_0, hwcfg).to(device)
prob_1_PE = ProgError(input_prob_1, hwcfg).to(device)
oVecPE = ProgError(oVec, hwcfg).to(device)
prob_0_Source = BinGen(input_prob_0, hwcfg, swcfg)().to(device)
prob_1_Source = BinGen(input_prob_1, hwcfg, swcfg)().to(device)
iVecRNG0 = RNG(hwcfg, swcfg)().to(device)
iVecRNG1 = RNG(hwcfg, swcfg)().to(device)
prob_0_BS = BSGen(prob_0_Source, iVecRNG0, swcfg).to(device)
prob_1_BS = BSGen(prob_1_Source, iVecRNG1, swcfg).to(device)
with torch.no_grad():
start_time = time.time()
idx = torch.zeros(input_prob_0.size()).type(torch.long).to(device)
for i in range(2**bitwidth):
#print(i)
iBS_0 = prob_0_BS(idx + i)
iBS_1 = prob_1_BS(idx + i)
prob_0_PE.Monitor(iBS_0)
prob_1_PE.Monitor(iBS_1)
oVecU = dut_mul(iBS_0, iBS_1)
oVecPE.Monitor(oVecU)
print("--- %s seconds ---" % (time.time() - start_time))
print(mode)
print("input 0 error: ", "min:", torch.min(prob_0_PE()[1]), "max:", torch.max(prob_0_PE()[1]))
print("input 1 error: ", "min:", torch.min(prob_1_PE()[1]), "max:", torch.max(prob_1_PE()[1]))
print("output error: ", "min:", torch.min(oVecPE()[1]), "max:", torch.max(oVecPE()[1]), "rmse:", torch.sqrt(torch.mean(torch.mul(oVecPE()[1], oVecPE()[1]))), "bias:", torch.mean(oVecPE()[1]))
# result_pe = oVecPE()[1].cpu().numpy().flatten()
# fig = plt.hist(result_pe, bins='auto') # arguments are passed to np.histogram
# plt.title("Histogram for final output error")
# plt.show()
if __name__ == '__main__':
test_fsumul_in_stream()
|
# -*- coding: UTF-8 -*-
import sys
import textwrap
import click
import tweepy
from blabbr.config import Config
from blabbr.model import ModelBuilder, TwitterDigger
from blabbr.twitter import TwitterClient
from blabbr.bot import Bot
class Cli:
def __init__(self, cfg=None, model=None, **kw):
self.cfg = Config.from_path(cfg)
self.model_path = model
self.model_builder = None
def print_text(self, text, width=80, **kw):
text = textwrap.dedent(text).strip()
paragraphs = [
textwrap.fill(p, width=width)
for p in text.split("\n\n")]
click.echo("\n\n".join(paragraphs), **kw)
def setup(self, force=False, check=False, **kw):
if check:
return self.setup_check()
did_setup = True
did_setup &= self.setup_nltk(force=force, **kw)
did_setup &= self.setup_auth(force=force, **kw)
if not did_setup and not force:
click.echo("The bot is already setup! Use --force if you're sure.",
err=True)
sys.exit(1)
def setup_auth(self, noninteractive=False, force=False, **kw):
if not force and len(self.cfg.get_auth()) >= 4:
return False
auth = {k: kw.get(k) for k in
("consumer_key", "consumer_secret", "token", "token_secret")}
missing_infos = set(k for k, v in auth.items() if v is None)
if missing_infos:
if noninteractive:
click.echo("Missing auth info: %s" % ", ".join(missing_infos),
err=True)
sys.exit(1)
if missing_infos & {"consumer_key", "consumer_secret"}:
self.print_text("""
You need to create a Twitter app in order to access the
Twitter API. Go on the following page:
https://apps.twitter.com/app/new
You don't have to fill the callback URL. Also, the account
with which you create your app has little importance as
long as you have a valid mobile phone number attached to
it.
Once you're done, go on the "Keys and Access Tokens" tab
and copy the consumer key & secret here.
""")
click.echo()
auth["consumer_key"] = input("Consumer key: ").strip()
auth["consumer_secret"] = input("Consumer secret: ").strip()
click.echo()
if missing_infos & {"token", "token_secret"}:
oauth = tweepy.OAuthHandler(auth["consumer_key"],
auth["consumer_secret"])
redirect_url = oauth.get_authorization_url()
self.print_text("""
You need to create an account for your bot then make it
allow your app to use its account.
Open this URL in a browser in which you're logged with your
bot's account. It'll give you a verification code you'll
copy back here.
""")
click.echo("\n %s\n" % redirect_url)
code = input("Verification code: ")
oauth.get_access_token(code)
auth["token"] = oauth.access_token
auth["token_secret"] = oauth.access_token_secret
click.echo()
self.cfg.set_auth(auth)
client = TwitterClient(self.cfg)
valid_creds = client.verify_credentials()
if valid_creds:
click.echo("Your bot's authentication is set up!")
click.echo("Screen name: %s" % valid_creds.screen_name)
click.echo("Name: %s" % valid_creds.name)
# Save the bot identification infos in the config for later use.
# This saves us an API call when we need them; e.g. when filtering
# the home timeline since it may contains some of the bot's tweets.
self.cfg.set("bot", "screen_name", valid_creds.screen_name)
else:
click.echo(
"Your authentication credentials seem invalid. Please check"
" them again.")
self.setup_check()
self.cfg.save()
return True
def setup_nltk(self, **kw):
import nltk
from nltk.data import find
tagger = "averaged_perceptron_tagger"
try:
find("taggers/%s" % tagger)
except LookupError:
click.echo("Downloading NTLK data (~2MB)...")
nltk.download(tagger)
return True
return False
def setup_check(self):
client = TwitterClient(self.cfg)
valid_creds = client.verify_credentials()
if valid_creds:
click.echo("Your bot's authentication is set up!")
click.echo("Screen name: %s" % valid_creds.screen_name)
click.echo("Name: %s" % valid_creds.name)
# Save the bot identification infos in the config for later use.
# This saves us an API call when we need them; e.g. when filtering
# the home timeline since it may contains some of the bot's tweets.
self.cfg.set("bot", "screen_name", valid_creds.screen_name)
self.cfg.set("bot", "id", valid_creds.id_str)
else:
click.echo(
"Your authentication credentials seem invalid. Please check"
" them again.")
def config(self, name=None, value=None):
if not name:
click.echo(self.cfg.git_like_representation())
return
if name == "init":
self.cfg.save()
return
if "." not in name:
click.echo("Config variable must be of the form <section>.<name>",
err=True)
sys.exit(1)
section, name = name.split(".", 1)
if value:
self.cfg.set(section, name, value)
self.cfg.save()
return
_nil = object()
value = self.cfg.get(section, name, fallback=_nil)
if value is not _nil:
click.echo(value)
def _load_model(self):
self.model_builder = ModelBuilder(self.model_path)
return self.model_builder
def _model(self):
return self.model_builder.model()
def populate(self, raw=None, from_raw=None, pick_friends=10,
timeline_size=1000, chunk_size=2000):
digger = TwitterDigger(self.cfg)
tweets = []
if from_raw:
with open(from_raw) as f:
for line in f:
tweets.append(line.rstrip())
else:
tweets = digger.tweets(pick_friends=pick_friends,
timeline_size=timeline_size)
if raw:
with open(raw, "a") as f:
try:
for tweet in tweets:
f.write("%s\n" % tweet.replace("\n", " "))
except KeyboardInterrupt:
pass
return
return self._populate(tweets, chunk_size)
def _populate(self, tweets, chunk_size=2000):
with self._load_model() as mb:
corpus = []
try:
for tweet in tweets:
corpus.append(tweet)
if len(corpus) == chunk_size:
click.echo("Feeding %d tweets..." % chunk_size)
mb.feed_corpus("\n".join(corpus))
corpus = []
except KeyboardInterrupt:
pass
if corpus:
click.echo("Feeding %d tweets..." % len(corpus))
mb.feed_corpus("\n".join(corpus))
def run(self, dry_run=False, debug=False):
self._load_model()
model = self._model()
if model is None:
raise RuntimeError("The bot cannot run with an empty model")
Bot(cfg=self.cfg, model=model, dry_run=dry_run, debug=debug).live()
@click.group()
@click.option('--cfg', type=click.Path(), help="Path to the config")
@click.option("--model", type=click.Path(),
default="blabbr.json", # default to the current directory
help="Path to the saved model")
@click.pass_context
def cli(ctx, **kw):
ctx.obj = Cli(**kw)
@cli.command()
@click.option("--consumer-key", metavar="KEY", help="Consumer key (API key)")
@click.option("--consumer-secret", metavar="SECRET",
help="Consumer secret (API secret)")
@click.option("--token", metavar="TOKEN", help="Access token")
@click.option("--token-secret", metavar="SECRET", help="Access token secret")
@click.option("--noninteractive", is_flag=True,
help=("Fail if one option is missing instead of offering an"
" interactive setup."))
@click.option("--force", is_flag=True,
help="Force the setup even if the bot is already set.")
@click.option("--check", is_flag=True,
help="Check the authentication setup.")
@click.pass_obj
def setup(cli, *args, **kw):
"""Setup the bot's config"""
cli.setup(*args, **kw)
@cli.command()
@click.option("--raw", type=click.Path(),
help=("Dump tweets in a file instead of feeding a model."
" Useful for debugging."))
@click.option("--from-raw", type=click.Path(),
help=("Read tweets from a file instead of the Twitter API."))
@click.option("--pick-friends", type=int,
default=10,
help=("Number of friends to check when populating from Twitter"
" (default: 10)."))
@click.option("--timeline-size", type=int,
default=1000,
help="Number of tweets to retrieve per user (default: 1000)")
@click.option("--chunk-size", type=int,
default=2000,
help="Number of tweets to insert at a time in the model.")
@click.pass_obj
def populate(cli, *args, **kw):
"""Populate the Markov model"""
cli.populate(*args, **kw)
@cli.command()
@click.option("--dry-run", "-n", is_flag=True,
help="Don't tweet anything (useful for debugging).")
@click.option("--debug", is_flag=True,
help="Debug mode. Implies --dry-run.")
@click.pass_obj
def run(cli, *args, **kw):
"""Run the bot"""
cli.run(*args, **kw)
@cli.command()
@click.argument("name", required=False)
@click.argument("value", required=False)
@click.pass_obj
def config(cli, *args, **kw):
"""
Manage the bot's configuration. This command works like `git config` and
prints the whole configuration when called without argument. Passing a name
prints only this variable, and passing both a name and a value sets the
variable.
"""
cli.config(*args, **kw)
if __name__ == "__main__":
cli()
|
from heapq import heappush, heappop
class Solution:
def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:
if not nums1 or not nums2:
return []
heap = [(nums1[0] + nums2[0], 0, 0)]
seen = set((0, 0))
result = []
while heap and k > 0:
_, i, j = heappop(heap)
result.append([nums1[i], nums2[j]])
k -= 1
if i + 1 < len(nums1) and (i + 1, j) not in seen:
heappush(heap, (nums1[i + 1] + nums2[j], i + 1, j))
seen.add((i + 1, j))
if j + 1 < len(nums2) and (i, j + 1) not in seen:
heappush(heap, (nums1[i] + nums2[j + 1], i, j + 1))
seen.add((i, j + 1))
return result
# map + heap
class Solution:
def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:
if not k or not nums1 or not nums2:
return []
i = 0; j = 0
dic1, dic2 = collections.defaultdict(int), collections.defaultdict(int)
for n1 in nums1:
dic1[n1]+=1
for n2 in nums2:
dic2[n2]+=1
res = []
l1, l2 = sorted(dic1.items()), sorted(dic2.items())
q = [[l1[0][0] + l2[0][0], 0, 0]]
i, j = 0, 0
visited = set([(0, 0)])
while k>0 and q:
top = heapq.heappop(q)
_, i, j = top
(d1, c1), (d2,c2) = l1[i], l2[j]
res.extend([[d1,d2]]*min(c1*c2, k))
k -= c1*c2
if k > 0:
if i < len(l1)-1 and (i+1, j) not in visited:
heapq.heappush(q, [l1[i+1][0] + d2, i+1, j])
visited |= {(i+1, j)}
if j<len(l2) - 1 and (i, j+1) not in visited:
heapq.heappush(q, [d1 + l2[j+1][0] , i, j+1])
visited |= {(i, j+1)}
return res
class Solution:
def kSmallestPairs(self, nums1, nums2, k):
queue = []
def push(i, j):
if i < len(nums1) and j < len(nums2):
heapq.heappush(queue, [nums1[i] + nums2[j], i, j])
push(0, 0)
pairs = []
while queue and len(pairs) < k:
_, i, j = heapq.heappop(queue)
pairs.append([nums1[i], nums2[j]])
push(i, j + 1)
if j == 0:
push(i + 1, 0)
return pairs
#Brute Force O(k) extra memory and O(mn log k) time.
class Solution:
def kSmallestPairs(self, nums1, nums2, k):
return map(list, sorted(itertools.product(nums1, nums2), key=sum)[:k])
#Brute Force
class Solution:
def kSmallestPairs(self, nums1, nums2, k):
return map(list, heapq.nsmallest(k, itertools.product(nums1, nums2), key=sum))
#or
class Solution:
def kSmallestPairs(self, nums1, nums2, k):
return heapq.nsmallest(k, ([u, v] for u in nums1 for v in nums2), key=sum)
class Solution:
def kSmallestPairs(self, nums1, nums2, k):
streams = map(lambda u: ([u+v, u, v] for v in nums2), nums1)
stream = heapq.merge(*streams)
return [suv[1:] for suv in itertools.islice(stream, k)] #
|
from ..code.web.scraper.scrap.walmart import get_url_walmart, scrap_walmart
from . import setup_get_driver_details
def test_get_url_walmart_1():
item_name = "SAMSUNG Galaxy Tab A7 32GB"
assert (
get_url_walmart(item_name)
== "https://www.walmart.com/search?q=SAMSUNG+Galaxy+Tab+A7+32GB"
)
def test_get_url_walmart_2():
assert (
get_url_walmart("Brita Longlast Replacement Filters Dispensers")
== "https://www.walmart.com/search?q=Brita+Longlast+Replacement+Filters+Dispensers"
)
def test_scrap_walmart():
item_name = "Fresh Strawberries, 1 lb"
results = scrap_walmart(setup_get_driver_details(), item_name)
assert results is not None
|
#!/usr/bin/env python
"""
Download interface for data from RAOB network
"""
import sys
import cgi
import datetime
import pytz
from pyiem.util import get_dbconn, ssw
from pyiem.network import Table as NetworkTable
def m(val):
"""Helper"""
if val is None:
return 'M'
return val
def fetcher(station, sts, ets):
"""Do fetching"""
dbconn = get_dbconn('postgis')
cursor = dbconn.cursor('raobstreamer')
stations = [station, ]
if station.startswith("_"):
nt = NetworkTable("RAOB")
stations = nt.sts[station]['name'].split("--")[1].strip().split(",")
cursor.execute("""
SELECT f.valid at time zone 'UTC', p.levelcode, p.pressure, p.height,
p.tmpc, p.dwpc, p.drct, round((p.smps * 1.94384)::numeric,0),
p.bearing, p.range_miles, f.station from
raob_profile p JOIN raob_flights f on
(f.fid = p.fid) WHERE f.station in %s and valid >= %s and valid < %s
""", (tuple(stations), sts, ets))
ssw(("station,validUTC,levelcode,pressure_mb,height_m,tmpc,"
"dwpc,drct,speed_kts,bearing,range_sm\n"))
for row in cursor:
ssw(("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
) % (row[10], m(row[0]),
m(row[1]), m(row[2]), m(row[3]), m(row[4]),
m(row[5]), m(row[6]), m(row[7]),
m(row[8]), m(row[9])))
def friendly_date(form, key):
"""More forgiving date conversion"""
val = form.getfirst(key)
try:
val = val.strip()
if len(val.split()) == 1:
dt = datetime.datetime.strptime(val, '%m/%d/%Y')
else:
dt = datetime.datetime.strptime(val, '%m/%d/%Y %H:%M')
dt = dt.replace(tzinfo=pytz.UTC)
except Exception as _exp:
ssw('Content-type: text/plain\n\n')
ssw(('Invalid %s date provided, should be "%%m/%%d/%%Y %%H:%%M"'
' in UTC timezone'
) % (key, ))
sys.exit()
return dt
def main():
"""Go Main Go"""
form = cgi.FieldStorage()
sts = friendly_date(form, 'sts')
ets = friendly_date(form, 'ets')
station = form.getfirst('station', 'KOAX')[:4]
if form.getfirst('dl', None) is not None:
ssw('Content-type: application/octet-stream\n')
ssw(("Content-Disposition: attachment; filename=%s_%s_%s.txt\n\n"
) % (station, sts.strftime("%Y%m%d%H"),
ets.strftime("%Y%m%d%H")))
else:
ssw('Content-type: text/plain\n\n')
fetcher(station, sts, ets)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Copyright (C) 2006-2008 Async Open Source
# Henrique Romano <henrique@async.com.br>
# Johan Dahlin <jdahlin@async.com.br>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# TODO:
# Toolbars
"""Usage: gtk-builder-convert [OPTION] [INPUT] [OUTPUT]
Converts Glade files into XML files which can be loaded with GtkBuilder.
The [INPUT] file is
-w, --skip-windows Convert everything but GtkWindow subclasses.
-r, --root Convert only widget named root and its children
-h, --help display this help and exit
When OUTPUT is -, write to standard input.
Examples:
gtk-builder-convert preference.glade preferences.ui
Report bugs to http://bugzilla.gnome.org/."""
import getopt
import os
import sys
from xml.dom import minidom, Node
WINDOWS = ['GtkWindow',
'GtkDialog',
'GtkFileChooserDialog',
'GtkMessageDialog']
# The subprocess is only available in Python 2.4+
try:
import subprocess
subprocess # pyflakes
except ImportError:
subprocess = None
def get_child_nodes(node):
assert node.tagName == 'object'
nodes = []
for child in node.childNodes:
if child.nodeType != Node.ELEMENT_NODE:
continue
if child.tagName != 'child':
continue
nodes.append(child)
return nodes
def get_properties(node):
assert node.tagName == 'object'
properties = {}
for child in node.childNodes:
if child.nodeType != Node.ELEMENT_NODE:
continue
if child.tagName != 'property':
continue
value = child.childNodes[0].data
properties[child.getAttribute('name')] = value
return properties
def get_property(node, property_name):
assert node.tagName == 'object'
properties = get_properties(node)
return properties.get(property_name)
def get_property_node(node, property_name):
assert node.tagName == 'object'
properties = {}
for child in node.childNodes:
if child.nodeType != Node.ELEMENT_NODE:
continue
if child.tagName != 'property':
continue
if child.getAttribute('name') == property_name:
return child
def get_signal_nodes(node):
assert node.tagName == 'object'
signals = []
for child in node.childNodes:
if child.nodeType != Node.ELEMENT_NODE:
continue
if child.tagName == 'signal':
signals.append(child)
return signals
def get_property_nodes(node):
assert node.tagName == 'object'
properties = []
for child in node.childNodes:
if child.nodeType != Node.ELEMENT_NODE:
continue
# FIXME: handle comments
if child.tagName == 'property':
properties.append(child)
return properties
def get_accelerator_nodes(node):
assert node.tagName == 'object'
accelerators = []
for child in node.childNodes:
if child.nodeType != Node.ELEMENT_NODE:
continue
if child.tagName == 'accelerator':
accelerators.append(child)
return accelerators
def get_object_node(child_node):
assert child_node.tagName == 'child', child_node
nodes = []
for node in child_node.childNodes:
if node.nodeType != Node.ELEMENT_NODE:
continue
if node.tagName == 'object':
nodes.append(node)
assert len(nodes) == 1, nodes
return nodes[0]
def copy_properties(node, props, prop_dict):
assert node.tagName == 'object'
for prop_name in props:
child = get_property_node(node, prop_name)
if child is not None:
prop_dict[prop_name] = child
return node
class GtkBuilderConverter(object):
def __init__(self, skip_windows, root):
self.skip_windows = skip_windows
self.root = root
self.root_objects = []
self.objects = {}
#
# Public API
#
def parse_file(self, file):
self._dom = minidom.parse(file)
self._parse()
def parse_buffer(self, buffer):
self._dom = minidom.parseString(buffer)
self._parse()
def to_xml(self):
xml = self._dom.toprettyxml("", "")
return xml.encode('utf-8')
#
# Private
#
def _get_object(self, name):
return self.objects.get(name)
def _get_objects_by_attr(self, attribute, value):
return [w for w in self._dom.getElementsByTagName("object")
if w.getAttribute(attribute) == value]
def _create_object(self, obj_class, obj_id, template=None, properties=None):
"""
Creates a new <object> tag.
Optionally a name template can be provided which will be used
to avoid naming collisions.
The properties dictionary can either contain string values or Node
values. If a node is provided the name of the node will be overridden
by the dictionary key.
@param obj_class: class of the object (class tag)
@param obj_id: identifier of the object (id tag)
@param template: name template to use, for example 'button'
@param properties: dictionary of properties
@type properties: string or Node.
@returns: Newly created node of the object
"""
if template is not None:
count = 1
while True:
obj_id = template + str(count)
widget = self._get_object(obj_id)
if widget is None:
break
count += 1
obj = self._dom.createElement('object')
obj.setAttribute('class', obj_class)
obj.setAttribute('id', obj_id)
if properties:
for name, value in properties.items():
if isinstance(value, Node):
# Reuse the node, so translatable and context still will be
# set when converting nodes. See also #509153
prop = value
else:
prop = self._dom.createElement('property')
prop.appendChild(self._dom.createTextNode(value))
prop.setAttribute('name', str(name))
obj.appendChild(prop)
self.objects[obj_id] = obj
return obj
def _create_root_object(self, obj_class, template, properties=None):
obj = self._create_object(obj_class, None, template, properties)
self.root_objects.append(obj)
return obj
def _parse(self):
glade_iface = self._dom.getElementsByTagName("glade-interface")
assert glade_iface, ("Badly formed XML, there is "
"no <glade-interface> tag.")
# Rename glade-interface to interface
glade_iface[0].tagName = 'interface'
self._interface = glade_iface[0]
# Remove glade-interface doc type
for node in self._dom.childNodes:
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
if node.name == 'glade-interface':
self._dom.removeChild(node)
# Strip unsupported tags
for tag in ['requires']:
for child in self._dom.getElementsByTagName(tag):
child.parentNode.removeChild(child)
if self.root:
self._strip_root(self.root)
# Rename widget to object
objects = self._dom.getElementsByTagName("widget")
for node in objects:
node.tagName = "object"
for node in objects:
self._convert(node.getAttribute("class"), node)
self.objects[node.getAttribute('id')] = node
# Convert Gazpachos UI tag
for node in self._dom.getElementsByTagName("ui"):
self._convert_ui(node)
# Convert accessibility tag
for node in self._dom.getElementsByTagName("accessibility"):
self._convert_accessibility(node)
# Output the newly created root objects and sort them
# by attribute id
for obj in sorted(self.root_objects,
key=lambda n: n.getAttribute('id'),
reverse=True):
self._interface.childNodes.insert(0, obj)
def _convert(self, klass, node):
if klass == 'GtkNotebook':
self._packing_prop_to_child_attr(node, "type", "tab")
elif klass in ['GtkExpander', 'GtkFrame']:
self._packing_prop_to_child_attr(
node, "type", "label_item", "label")
elif klass == "GtkMenuBar":
self._convert_menu(node)
elif klass == "GtkMenu":
# Only convert toplevel popups
if node.parentNode == self._interface:
self._convert_menu(node, popup=True)
elif klass in WINDOWS and self.skip_windows:
self._remove_window(node)
self._default_widget_converter(node)
def _default_widget_converter(self, node):
klass = node.getAttribute("class")
for prop in get_property_nodes(node):
prop_name = prop.getAttribute("name")
if prop_name == "sizegroup":
self._convert_sizegroup(node, prop)
elif prop_name == "tooltip" and klass != "GtkAction":
prop.setAttribute("name", "tooltip-text")
elif prop_name in ["response_id", 'response-id']:
# It does not make sense to convert responses when
# we're not going to output dialogs
if self.skip_windows:
continue
object_id = node.getAttribute('id')
response = prop.childNodes[0].data
self._convert_dialog_response(node, object_id, response)
prop.parentNode.removeChild(prop)
elif prop_name == "adjustment":
self._convert_adjustment(prop)
elif prop_name == "items" and klass in ['GtkComboBox',
'GtkComboBoxEntry']:
self._convert_combobox_items(node, prop)
elif prop_name == "text" and klass == 'GtkTextView':
self._convert_textview_text(prop)
def _remove_window(self, node):
object_node = get_object_node(get_child_nodes(node)[0])
parent = node.parentNode
parent.removeChild(node)
parent.appendChild(object_node)
def _convert_menu(self, node, popup=False):
if node.hasAttribute('constructor'):
return
uimgr = self._create_root_object('GtkUIManager',
template='uimanager')
if popup:
name = 'popup'
else:
name = 'menubar'
menu = self._dom.createElement(name)
menu.setAttribute('name', node.getAttribute('id'))
node.setAttribute('constructor', uimgr.getAttribute('id'))
for child in get_child_nodes(node):
obj_node = get_object_node(child)
item = self._convert_menuitem(uimgr, obj_node)
menu.appendChild(item)
child.removeChild(obj_node)
child.parentNode.removeChild(child)
ui = self._dom.createElement('ui')
uimgr.appendChild(ui)
ui.appendChild(menu)
def _convert_menuitem(self, uimgr, obj_node):
children = get_child_nodes(obj_node)
name = 'menuitem'
if children:
child_node = children[0]
menu_node = get_object_node(child_node)
# Can be GtkImage, which will take care of later.
if menu_node.getAttribute('class') == 'GtkMenu':
name = 'menu'
object_class = obj_node.getAttribute('class')
if object_class in ['GtkMenuItem',
'GtkImageMenuItem',
'GtkCheckMenuItem',
'GtkRadioMenuItem']:
menu = self._dom.createElement(name)
elif object_class == 'GtkSeparatorMenuItem':
return self._dom.createElement('separator')
else:
raise NotImplementedError(object_class)
menu.setAttribute('action', obj_node.getAttribute('id'))
self._add_action_from_menuitem(uimgr, obj_node)
if children:
for child in get_child_nodes(menu_node):
obj_node = get_object_node(child)
item = self._convert_menuitem(uimgr, obj_node)
menu.appendChild(item)
child.removeChild(obj_node)
child.parentNode.removeChild(child)
return menu
def _menuitem_to_action(self, node, properties):
copy_properties(node, ['label', 'tooltip'], properties)
def _togglemenuitem_to_action(self, node, properties):
self._menuitem_to_action(node, properties)
copy_properties(node, ['active'], properties)
def _radiomenuitem_to_action(self, node, properties):
self._togglemenuitem_to_action(node, properties)
copy_properties(node, ['group'], properties)
def _add_action_from_menuitem(self, uimgr, node):
properties = {}
object_class = node.getAttribute('class')
object_id = node.getAttribute('id')
if object_class == 'GtkMenuItem':
name = 'GtkAction'
self._menuitem_to_action(node, properties)
elif object_class == 'GtkCheckMenuItem':
name = 'GtkToggleAction'
self._togglemenuitem_to_action(node, properties)
elif object_class == 'GtkRadioMenuItem':
name = 'GtkRadioAction'
self._radiomenuitem_to_action(node, properties)
elif object_class == 'GtkImageMenuItem':
name = 'GtkAction'
children = get_child_nodes(node)
if (children and
children[0].getAttribute('internal-child') == 'image'):
image = get_object_node(children[0])
child = get_property_node(image, 'stock')
if child is not None:
properties['stock_id'] = child
self._menuitem_to_action(node, properties)
elif object_class == 'GtkSeparatorMenuItem':
return
else:
raise NotImplementedError(object_class)
if get_property(node, 'use_stock') == 'True':
if 'label' in properties:
properties['stock_id'] = properties['label']
del properties['label']
properties['name'] = object_id
action = self._create_object(name,
object_id,
properties=properties)
for signal in get_signal_nodes(node):
signal_name = signal.getAttribute('name')
if signal_name in ['activate', 'toggled']:
action.appendChild(signal)
else:
print 'Unhandled signal %s::%s' % (node.getAttribute('class'),
signal_name)
if not uimgr.childNodes:
child = self._dom.createElement('child')
uimgr.appendChild(child)
group = self._create_object('GtkActionGroup', None,
template='actiongroup')
child.appendChild(group)
else:
group = uimgr.childNodes[0].childNodes[0]
child = self._dom.createElement('child')
group.appendChild(child)
child.appendChild(action)
for accelerator in get_accelerator_nodes(node):
signal_name = accelerator.getAttribute('signal')
if signal_name != 'activate':
print 'Unhandled accelerator signal for %s::%s' % (
node.getAttribute('class'), signal_name)
continue
accelerator.removeAttribute('signal')
child.appendChild(accelerator)
def _convert_sizegroup(self, node, prop):
# This is Gazpacho only
node.removeChild(prop)
obj = self._get_object(prop.childNodes[0].data)
if obj is None:
widgets = self._get_objects_by_attr("class", "GtkSizeGroup")
if widgets:
obj = widgets[-1]
else:
obj = self._create_root_object('GtkSizeGroup',
template='sizegroup')
widgets = obj.getElementsByTagName("widgets")
if widgets:
assert len(widgets) == 1
widgets = widgets[0]
else:
widgets = self._dom.createElement("widgets")
obj.appendChild(widgets)
member = self._dom.createElement("widget")
member.setAttribute("name", node.getAttribute("id"))
widgets.appendChild(member)
def _convert_dialog_response(self, node, object_name, response):
# 1) Get parent dialog node
while True:
# If we can't find the parent dialog, give up
if node == self._dom:
return
if (node.tagName == 'object' and
node.getAttribute('class') == 'GtkDialog'):
dialog = node
break
node = node.parentNode
assert node
# 2) Get dialogs action-widgets tag, create if not found
for child in dialog.childNodes:
if child.nodeType != Node.ELEMENT_NODE:
continue
if child.tagName == 'action-widgets':
actions = child
break
else:
actions = self._dom.createElement("action-widgets")
dialog.appendChild(actions)
# 3) Add action-widget tag for the response
action = self._dom.createElement("action-widget")
action.setAttribute("response", response)
action.appendChild(self._dom.createTextNode(object_name))
actions.appendChild(action)
def _convert_adjustment(self, prop):
properties = {}
if prop.childNodes:
data = prop.childNodes[0].data
value, lower, upper, step, page, page_size = data.split(' ')
properties.update(value=value,
lower=lower,
upper=upper,
step_increment=step,
page_increment=page,
page_size=page_size)
else:
prop.appendChild(self._dom.createTextNode(""))
adj = self._create_root_object("GtkAdjustment",
template='adjustment',
properties=properties)
prop.childNodes[0].data = adj.getAttribute('id')
def _convert_combobox_items(self, node, prop):
parent = prop.parentNode
if not prop.childNodes:
parent.removeChild(prop)
return
value = prop.childNodes[0].data
model = self._create_root_object("GtkListStore",
template="model")
columns = self._dom.createElement('columns')
model.appendChild(columns)
column = self._dom.createElement('column')
column.setAttribute('type', 'gchararray')
columns.appendChild(column)
data = self._dom.createElement('data')
model.appendChild(data)
for item in value.split('\n'):
row = self._dom.createElement('row')
data.appendChild(row)
col = self._dom.createElement('col')
col.setAttribute('id', '0')
col.appendChild(self._dom.createTextNode(item))
row.appendChild(col)
model_prop = self._dom.createElement('property')
model_prop.setAttribute('name', 'model')
model_prop.appendChild(
self._dom.createTextNode(model.getAttribute('id')))
parent.appendChild(model_prop)
parent.removeChild(prop)
child = self._dom.createElement('child')
node.appendChild(child)
cell_renderer = self._create_object('GtkCellRendererText', None,
template='renderer')
child.appendChild(cell_renderer)
attributes = self._dom.createElement('attributes')
child.appendChild(attributes)
attribute = self._dom.createElement('attribute')
attributes.appendChild(attribute)
attribute.setAttribute('name', 'text')
attribute.appendChild(self._dom.createTextNode('0'))
def _convert_textview_text(self, prop):
if not prop.childNodes:
prop.parentNode.removeChild(prop)
return
data = prop.childNodes[0].data
if prop.hasAttribute('translatable'):
prop.removeAttribute('translatable')
tbuffer = self._create_root_object("GtkTextBuffer",
template='textbuffer',
properties=dict(text=data))
prop.childNodes[0].data = tbuffer.getAttribute('id')
def _packing_prop_to_child_attr(self, node, prop_name, prop_val,
attr_val=None):
for child in get_child_nodes(node):
packing_props = [p for p in child.childNodes if p.nodeName == "packing"]
if not packing_props:
continue
assert len(packing_props) == 1
packing_prop = packing_props[0]
properties = packing_prop.getElementsByTagName("property")
for prop in properties:
if (prop.getAttribute("name") != prop_name or
prop.childNodes[0].data != prop_val):
continue
packing_prop.removeChild(prop)
child.setAttribute(prop_name, attr_val or prop_val)
if len(properties) == 1:
child.removeChild(packing_prop)
def _convert_ui(self, node):
cdata = node.childNodes[0]
data = cdata.toxml().strip()
if not data.startswith("<![CDATA[") or not data.endswith("]]>"):
return
data = data[9:-3]
child = minidom.parseString(data).childNodes[0]
nodes = child.childNodes[:]
for child_node in nodes:
node.appendChild(child_node)
node.removeChild(cdata)
if not node.hasAttribute("id"):
return
# Updating references made by widgets
parent_id = node.parentNode.getAttribute("id")
for widget in self._get_objects_by_attr("constructor",
node.getAttribute("id")):
widget.getAttributeNode("constructor").value = parent_id
node.removeAttribute("id")
def _convert_accessibility(self, node):
objectNode = node.parentNode
parent_id = objectNode.getAttribute("id")
properties = {}
for node in node.childNodes:
if node.nodeName == 'atkproperty':
node.tagName = 'property'
properties[node.getAttribute('name')] = node
node.parentNode.removeChild(node)
elif node.nodeName == 'atkrelation':
node.tagName = 'relation'
relation_type = node.getAttribute('type')
relation_type = relation_type.replace('_', '-')
node.setAttribute('type', relation_type)
elif node.nodeName == 'atkaction':
node.tagName = 'action'
if properties:
child = self._dom.createElement('child')
child.setAttribute("internal-child", "accessible")
atkobject = self._create_object(
"AtkObject", None,
template='a11y-%s' % (parent_id,),
properties=properties)
child.appendChild(atkobject)
objectNode.appendChild(child)
def _strip_root(self, root_name):
for widget in self._dom.getElementsByTagName("widget"):
if widget.getAttribute('id') == root_name:
break
else:
raise SystemExit("Could not find an object called `%s'" % (
root_name))
for child in self._interface.childNodes[:]:
if child.nodeType != Node.ELEMENT_NODE:
continue
child.parentNode.removeChild(child)
self._interface.appendChild(widget)
def _indent(output):
if not subprocess:
return output
for directory in os.environ['PATH'].split(os.pathsep):
filename = os.path.join(directory, 'xmllint')
if os.path.exists(filename):
break
else:
return output
s = subprocess.Popen([filename, '--format', '-'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
s.stdin.write(output)
s.stdin.close()
return s.stdout.read()
def usage():
print __doc__
def main(args):
try:
opts, args = getopt.getopt(args[1:], "hwr:",
["help", "skip-windows", "root="])
except getopt.GetoptError:
usage()
return 2
if len(args) != 2:
usage()
return 2
input_filename, output_filename = args
skip_windows = False
split = False
root = None
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-r", "--root"):
root = a
elif o in ("-w", "--skip-windows"):
skip_windows = True
conv = GtkBuilderConverter(skip_windows=skip_windows,
root=root)
conv.parse_file(input_filename)
xml = _indent(conv.to_xml())
if output_filename == "-":
print xml
else:
open(output_filename, 'w').write(xml)
print "Wrote", output_filename
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycylon import Table
from pycylon import CylonContext
ctx: CylonContext = CylonContext(config=None, distributed=False)
data1 = [[False, True, False, True], [True, True, False, False]]
columns1 = ['col-1', 'col-2']
data2 = [[True, True, False, False], [False, True, False, True]]
columns2 = ['col-1', 'col-2']
tb1: Table = Table.from_list(ctx, columns1, data1)
tb2: Table = Table.from_list(ctx, columns2, data2)
print("Table 1")
print(tb1)
print("Table 2")
print(tb2)
tb_or = tb1 | tb2
print("Or")
print(tb_or)
tb_and = tb1 & tb2
print("And")
print(tb_and)
tb_inv = ~tb1
print("Table")
print(tb1)
print("Invert Table")
print(tb_inv)
|
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Puppet metrics"""
from __future__ import print_function
import time
import yaml
from infra_libs import ts_mon
from chromite.lib import cros_logging as logging
config_version = ts_mon.GaugeMetric(
'puppet/version/config',
description='The version of the puppet configuration.'
' By default this is the time that the configuration was parsed')
puppet_version = ts_mon.StringMetric(
'puppet/version/puppet',
description='Version of puppet client installed.')
events = ts_mon.GaugeMetric(
'puppet/events',
description='Number of changes the puppet client made to the system in its'
' last run, by success or failure')
resources = ts_mon.GaugeMetric(
'puppet/resources',
description='Number of resources known by the puppet client in its last'
' run')
times = ts_mon.FloatMetric(
'puppet/times',
description='Time taken to perform various parts of the last puppet run',
units=ts_mon.MetricsDataUnits.SECONDS)
age = ts_mon.FloatMetric('puppet/age',
description='Time since last run',
units=ts_mon.MetricsDataUnits.SECONDS)
_LAST_RUN_FILE = '/var/lib/puppet_last_run_summary.yaml'
def get_puppet_summary(time_fn=time.time):
path = _LAST_RUN_FILE
try:
with open(path) as fh:
data = yaml.safe_load(fh)
except IOError:
# This is fine - the system probably isn't managed by puppet.
return
except yaml.YAMLError:
# This is less fine - the file exists but is invalid.
logging.exception('Failed to read puppet lastrunfile %s', path)
return
if not isinstance(data, dict):
return
try:
config_version.set(data['version']['config'])
except ts_mon.MonitoringInvalidValueTypeError:
# https://crbug.com/581749
logging.exception('lastrunfile contains invalid "config" value. '
'Please fix Puppet.')
except KeyError:
logging.warning('version/config not found in %s', path)
try:
puppet_version.set(data['version']['puppet'])
except ts_mon.MonitoringInvalidValueTypeError:
# https://crbug.com/581749
logging.exception('lastrunfile contains invalid puppet version. '
'Please fix Puppet.')
except KeyError:
logging.warning('version/puppet not found in %s', path)
try:
for key, value in data['events'].iteritems():
if key != 'total':
events.set(value, {'result': key})
except KeyError:
logging.warning('events not found in %s', path)
try:
for key, value in data['resources'].iteritems():
resources.set(value, {'action': key})
except KeyError:
logging.warning('resources not found in %s', path)
try:
for key, value in data['time'].iteritems():
if key == 'last_run':
age.set(time_fn() - value)
elif key != 'total':
times.set(value, {'step': key})
except KeyError:
logging.warning('time not found in %s', path)
|
cities=['北京北', '北京东', '北京', '北京南', '北京西', '广州南', '重庆北', '重庆', '重庆南', '广州东', '上海', '上海南', '上海虹桥', '上海西', '
天津北', '天津', '天津南', '天津西', '长春', '长春南', '长春西', '成都东', '成都南', '成都', '长沙', '长沙南', '福州', '福州南', '贵阳', '广州', '广
州西', '哈尔滨', '哈尔滨东', '哈尔滨西', '合肥', '合肥西', '呼和浩特东', '呼和浩特', '口东', '海口东', '海口', '杭州东', '杭州', '杭州南', '济南', '
济南东', '济南西', '昆明', '昆明西', '拉萨', '兰州东', '兰州', '兰州西', '南昌', '南京', '南京南', '南宁', '石家庄北', '石家庄', '沈阳', '沈阳北', '
沈阳东', '沈阳南', '太原北', '太原东', '太原', '武汉', '王家营西', '乌鲁木齐南', '西安北', '西安', '西安南', '西宁', '银川', '郑州', '阿尔山', '安康', '阿克苏', '阿里河', '阿拉山口', '安平', '安庆', '安顺', '鞍山', '安阳', '北安', '蚌埠', '白城', '北海', '白河', '白涧', '宝鸡', '滨江', '博克图', '百色', '白山市', '北台', '包头东', '包头', '北屯市', '本溪', '白云鄂博', '白银西', '亳州', '赤壁', '常德', '承德', '长甸', '赤峰', '茶陵', '苍南', '
昌平', '崇仁', '昌图', '长汀镇', '曹县', '楚雄南', '陈相屯', '长治北', '池州', '长征', '常州', '郴州', '长治', '沧州', '崇左', '大安北', '大成', '丹
东', '东方红', '东莞东', '大虎山', '敦煌', '敦化', '德惠', '东京城', '大涧', '都江堰', '大连北', '大理', '大连', '定南', '大庆', '东胜', '大石桥', '
大同', '东营', '大杨树', '都匀', '邓州', '达州', '德州', '额济纳', '二连', '恩施', '福鼎', '凤凰机场', '风陵渡', '涪陵', '富拉尔基', '抚顺北', '佛山', '阜新南', '阜阳', '格尔木', '广汉', '古交', '桂林北', '古莲', '桂林', '固始', '广水', '干塘', '广元', '广州北', '赣州', '公主岭', '公主岭南', '淮安', '淮北', '鹤北', '淮滨', '河边', '潢川', '韩城', '邯郸', '横道河子', '鹤岗', '皇姑屯', '红果', '黑河', '怀化', '汉口', '葫芦岛', '海拉尔', '霍林郭
勒', '海伦', '侯马', '哈密', '淮南', '桦南', '海宁西', '鹤庆', '怀柔北', '怀柔', '黄石东', '华山', '黄山', '黄石', '衡水', '衡阳', '菏泽', '贺州', '
汉中', '惠州', '吉安', '集安', '江边村', '晋城', '金城江', '景德镇', '嘉峰', '加格达奇', '井冈山', '蛟河', '金华南', '金华', '九江', '吉林', '荆门',
'佳木斯', '济宁', '集宁南', '酒泉', '江山', '吉首', '九台', '镜铁山', '鸡西', '绩溪县', '嘉峪关', '江油', '锦州', '金州', '蓟州', '库尔勒', '开封', '岢岚', '凯里', '喀什', '昆山南', '奎屯', '开原', '六安', '灵宝', '芦潮港', '隆昌', '陆川', '利川', '临川', '潞城', '鹿道', '娄底', '临汾', '良各庄',
'临河', '漯河', '绿化', '隆化', '丽江', '临江', '龙井', '吕梁', '醴陵', '柳林南', '滦平', '六盘水', '灵丘', '旅顺', '兰溪', '陇西', '澧县', '临西', '龙岩', '耒阳', '洛阳', '连云港东', '洛阳东', '临沂', '洛阳龙门', '柳园', '凌源', '辽源', '立志', '柳州', '辽中', '麻城', '免渡河', '牡丹江', '莫尔道
嘎', '明光', '满归', '漠河', '茂名', '茂名西', '密山', '马三家', '麻尾', '绵阳', '梅州', '满洲里', '宁波东', '宁波', '南岔', '南充', '南丹', '南大庙', '南芬', '讷河', '嫩江', '内江', '南平', '南通', '南阳', '碾子山', '平顶山', '盘锦', '平凉', '平凉南', '平泉', '坪石', '萍乡', '凭祥', '郫县西', '攀枝花', '蕲春', '青城山', '青岛', '清河城', '曲靖', '黔江', '前进镇', '齐齐哈尔', '七台河', '沁县', '泉州东', '泉州', '衢州', '融安', '汝箕沟', '瑞金', '日照', '双城堡', '绥芬河', '韶关东', '山海关', '绥化', '三间房', '苏家屯', '舒兰', '三明', '神木', '三门峡', '商南', '遂宁', '四平', '商丘', '上饶', '韶山', '宿松', '汕头', '邵武', '涉县', '三亚', '亚', '邵阳', '十堰', '双鸭山', '松原', '苏州', '深圳', '宿州', '随州', '朔州', '深圳西', '塘豹',
'塔尔气', '潼关', '塘沽', '塔河', '通化', '泰来', '吐鲁番', '通辽', '铁岭', '陶赖昭', '图们', '铜仁', '唐山北', '田师府', '泰山', '唐山', '天水', '通远堡', '太阳升', '泰州', '桐梓', '通州西', '五常', '武昌', '瓦房店', '威海', '芜湖', '乌海西', '吴家屯', '武隆', '乌兰浩特', '渭南', '威舍', '歪头山', '武威', '武威南', '无锡', '乌西', '乌伊岭', '武夷山', '万源', '万州', '梧州', '温州', '温州南', '西昌', '许昌', '西昌南', '香坊', '轩岗', '兴国', '宣汉', '新会', '新晃', '锡林浩特', '兴隆县', '厦门北', '厦门', '厦门高崎', '小市', '秀山', '向塘', '宣威', '新乡', '信阳', '咸阳', '襄阳', '熊岳城',
'新沂', '兴义', '新余', '徐州', '延安', '宜宾', '亚布力南', '叶柏寿', '宜昌东', '永川', '盐城', '宜昌', '运城', '伊春', '榆次', '杨村', '宜春西', '伊尔施', '燕岗', '永济', '延吉', '营口', '牙克石', '阎良', '玉林', '榆林', '亚龙湾', '一面坡', '伊宁', '阳平关', '玉屏', '原平', '延庆', '阳泉曲', '玉
泉', '阳泉', '营山', '玉山', '燕山', '榆树', '鹰潭', '烟台', '伊图里河', '玉田县', '义乌', '阳新', '义县', '益阳', '岳阳', '崖州', '永州', '扬州', '
淄博', '镇城底', '自贡', '珠海', '珠海北', '湛江', '镇江', '张家界', '张家口', '张家口南', '周口', '哲里木', '扎兰屯', '驻马店', '肇庆', '周水子', '
昭通', '中卫', '资阳', '遵义西', '枣庄', '资中', '株洲', '枣庄西', '昂昂溪', '阿城', '安达', '安德', '安定', '安多', '安广', '敖汉', '艾河', '安化',
'艾家村', '鳌江', '安家', '阿金', '安靖', '阿克陶', '安口窑', '敖力布告', '安龙', '阿龙山', '安陆', '阿木尔', '阿南庄', '安庆西', '鞍山西', '安塘', '安亭北', '阿图什', '安图', '安溪', '博鳌', '北碚', '白壁关', '蚌埠南', '巴楚', '板城', '北戴河', '保定', '宝坻', '八达岭', '巴东', '柏果', '布海', '
白河东', '贲红', '宝华山', '白河县', '白芨沟', '碧鸡关', '北滘', '碧江', '白鸡坡', '笔架山', '八角台', '保康', '白奎堡', '白狼', '百浪', '博乐', '宝
拉格', '巴林', '宝林', '北流', '勃利', '布列开', '宝龙山', '百里峡', '八面城', '班猫箐', '八面通', '北马圈子', '北票南', '白旗', '宝泉岭', '白泉', '
巴山', '白水江', '白沙坡', '白石山', '白水镇', '东', '坂田', '泊头', '北屯', '本溪湖', '博兴', '八仙筒', '白音察干', '背荫河', '北营', '巴彦高勒', '
白音他拉', '鲅鱼圈', '白银市', '白音胡硕', '巴中', '霸州', '北宅', '赤壁北', '查布嘎', '长城', '长冲', '承德东', '赤峰西', '嵯岗', '柴岗', '长葛', '
柴沟堡', '城固', '陈官营', '成高子', '草海', '柴河', '册亨', '草河口', '崔黄口', '巢湖', '蔡家沟', '成吉思汗', '岔江', '蔡家坡', '昌乐', '超梁沟', '
慈利', '昌黎', '长岭子', '晨明', '长农', '昌平北', '常平', '长坡岭', '辰清', '蔡山', '楚山', '长寿', '磁山', '苍石', '草市', '察素齐', '长山屯', '长
汀', '昌图西', '春湾', '磁县', '岑溪', '辰溪', '磁西', '长兴南', '磁窑', '春阳', '城阳', '创业村', '朝阳川', '朝阳地', '朝阳南', '长垣', '朝阳镇', '
滁州北', '常州北', '滁州', '潮州', '常庄', '曹子里', '车转湾', '郴州西', '沧州西', '德安', '大安', '大坝', '大板', '大巴', '到保', '定边', '东边井',
'德伯斯', '打柴沟', '德昌', '滴道', '大磴沟', '刀尔登', '得耳布尔', '东方', '丹凤', '东丰', '都格', '大官屯', '大关', '东光', '东海', '大灰厂', '大红旗', '大禾塘', '东海县', '德惠西', '达家沟', '东津', '杜家', '大口屯', '东来', '德令哈', '大陆号', '带岭', '大林', '达拉特旗', '独立屯', '豆罗', '达
拉特西', '大连西', '东明村', '洞庙河', '东明县', '大拟', '大平房', '大盘石', '大埔', '大堡', '大庆东', '大其拉哈', '道清', '对青山', '德清西', '大庆
西', '东升', '砀山', '独山', '登沙河', '读书铺', '大石头', '东胜西', '大石寨', '东台', '定陶', '灯塔', '大田边', '东通化', '丹徒', '大屯', '东湾', '
大武口', '低窝铺', '大王滩', '大湾子', '大兴沟', '大兴', '定西', '甸心', '东乡', '代县', '定襄', '东戌', '东辛庄', '丹阳', '德阳', '大雁', '当阳', '
丹阳北', '大英东', '东淤地', '大营', '定远', '岱岳', '大元', '大营镇', '大营子', '大战场', '德州东', '东至', '低庄', '东镇', '道州', '东庄', '兑镇',
'豆庄', '定州', '大竹园', '大杖子', '豆张庄', '峨边', '二道沟门', '二道湾', '鄂尔多斯', '二龙', '二龙山屯', '峨眉', '二密河', '二营', '鄂州', '福安', '丰城', '丰城南', '肥东', '发耳', '富海', '福海', '凤凰城', '汾河', '奉化', '富锦', '范家屯', '福利区', '福利屯', '丰乐镇', '阜南', '阜宁', '抚宁',
'福清', '福泉', '丰水村', '丰顺', '繁峙', '抚顺', '福山口', '扶绥', '冯屯', '浮图峪', '富县东', '凤县', '富县', '费县', '凤阳', '汾阳', '扶余北', '分宜', '富源', '扶余', '富裕', '抚州北', '凤州', '丰镇', '范镇', '固安', '广安', '高碑店', '沟帮子', '甘草店', '谷城', '藁城', '高村', '古城镇', '广德', '贵定', '贵定南', '古东', '贵港', '官高', '葛根庙', '干沟', '甘谷', '高各庄', '甘河', '根河', '郭家店', '孤家子', '古浪', '皋兰', '高楼房', '归流河', '关林', '甘洛', '郭磊庄', '高密', '公庙子', '工农湖', '广宁寺南', '广南卫', '高平', '甘泉北', '共青城', '甘旗卡', '甘泉', '高桥镇', '灌水', '赶水', '孤山口', '果松', '高山子', '嘎什甸子', '高台', '高滩', '古田', '官厅', '官厅西', '贵溪', '涡阳', '巩义', '高邑', '巩义南', '广元南', '固原', '菇园', '公营子', '光泽', '古镇', '固镇', '虢镇', '瓜州', '高州', '盖州', '官字井', '冠豸山', '盖州西', '淮安南', '红安', '海安县', '红安西', '黄柏', '海
北', '鹤壁', '会昌北', '华城', '河唇', '汉川', '海城', '合川', '黑冲滩', '黄村', '海城西', '化德', '洪洞', '霍尔果斯', '横峰', '韩府湾', '汉沽', '黄
瓜园', '红光镇', '浑河', '红花沟', '黄花筒', '贺家店', '和静', '红江', '黑井', '获嘉', '河津', '涵江', '华家', '杭锦后旗', '河间西', '花家庄', '河口
南', '黄口', '湖口', '呼兰', '葫芦岛北', '浩良河', '哈拉海', '鹤立', '桦林', '黄陵', '海林', '虎林', '寒岭', '和龙', '海龙', '哈拉苏', '呼鲁斯太', '
火连寨', '黄梅', '韩麻营', '黄泥河', '海宁', '惠农', '和平', '花棚子', '花桥', '宏庆', '怀仁', '华容', '华山北', '黄松甸', '和什托洛盖', '红山', '汉
寿', '衡山', '黑水', '惠山', '虎什哈', '红寺堡', '虎石台', '海石湾', '衡山西', '红砂岘', '黑台', '桓台', '和田', '会同', '海坨子', '黑旺', '海湾', '
红星', '徽县', '红兴隆', '换新天', '红岘台', '红彦', '合阳', '海阳', '衡阳东', '华蓥', '汉阴', '黄羊滩', '汉源', '河源', '花园', '湟源', '黄羊镇', '
湖州', '化州', '黄州', '霍州', '惠州西', '巨宝', '靖边', '金宝屯', '晋城北', '金昌', '鄄城', '交城', '建昌', '峻德', '井店', '鸡东', '江都', '鸡冠山', '金沟屯', '静海', '金河', '锦河', '精河', '精河南', '江华', '建湖', '纪家沟', '晋江', '锦界', '姜家', '江津', '金坑', '芨岭', '金马村', '江门东', '角美', '莒南', '井南', '建瓯', '经棚', '江桥', '九三', '金山北', '嘉善', '京山', '建始', '稷山', '吉舒', '建设', '甲山', '建三江', '嘉善南', '金山屯', '江所田', '景泰', '九台南', '吉文', '进贤', '莒县', '嘉祥', '介休', '嘉兴', '井陉', '嘉兴南', '夹心子', '姜堰', '揭阳', '建阳', '简阳', '巨野', '江永', '缙云', '靖远', '江源', '济源', '靖远西', '胶州北', '焦作东', '金寨', '靖州', '荆州', '胶州', '晋州', '锦州南', '焦作', '旧庄窝', '金杖子', '开
安', '库车', '康城', '库都尔', '宽甸', '克东', '昆都仑召', '开江', '康金井', '喀喇其', '开鲁', '克拉玛依', '口前', '昆山', '奎山', '克山', '开通', '
康熙岭', '昆阳', '克一河', '开原西', '康庄', '来宾', '老边', '灵宝西', '龙川', '乐昌', '黎城', '聊城', '蓝村', '两当', '林东', '乐都', '梁底下', '六
道河子', '鲁番', '廊坊', '落垡', '廊坊北', '老府', '兰岗', '龙骨甸', '芦沟', '龙沟', '拉古', '临海', '林海', '拉哈', '凌海', '柳河', '六合', '龙华',
'滦河沿', '六合镇', '亮甲店', '刘家店', '刘家河', '连江', '庐江', '李家', '罗江', '廉江', '两家', '龙江', '龙嘉', '莲江口', '蔺家楼', '李家坪', '兰考', '林口', '路口铺', '老莱', '拉林', '陆良', '龙里', '临澧', '兰棱', '零陵', '卢龙', '喇嘛甸', '里木店', '洛门', '龙南', '梁平', '罗平', '落坡岭', '
六盘山', '乐平市', '临清', '龙泉寺', '乐山北', '乐善村', '冷水江东', '连山关', '流水沟', '陵水', '丽水', '罗山', '鲁山', '梁山', '灵石', '露水河', '
庐山', '林盛堡', '柳树屯', '龙山镇', '梨树镇', '李石寨', '黎塘', '轮台', '芦台', '龙塘坝', '濑湍', '骆驼巷', '李旺', '莱芜东', '狼尾山', '灵武', '莱
芜西', '朗乡', '陇县', '临湘', '芦溪', '莱西', '林西', '滦县', '略阳', '莱阳', '辽阳', '临沂北', '凌源东', '连云港', '临颍', '老营', '龙游', '罗源',
'林源', '涟源', '涞源', '耒阳西', '临泽', '龙爪沟', '雷州', '六枝', '鹿寨', '来舟', '龙镇', '拉鲊', '兰州新区', '马鞍山', '毛坝', '毛坝关', '麻城北', '渑池', '明城', '庙城', '渑池南', '茅草坪', '猛洞河', '磨刀石', '弥渡', '帽儿山', '明港', '梅河口', '马皇', '孟家岗', '美兰', '汨罗东', '马莲河', '
茅岭', '庙岭', '茂林', '穆棱', '马林', '马龙', '木里图', '汨罗', '玛纳斯湖', '冕宁', '沐滂', '马桥河', '闽清', '民权', '明水河', '麻山', '眉山', '漫
水湾', '茂舍祖', '米沙子', '美溪', '勉县', '麻阳', '密云北', '米易', '麦园', '墨玉', '庙庄', '米脂', '明珠', '宁安', '农安', '南博山', '南仇', '南城
司', '宁村', '宁德', '南观村', '南宫东', '南关岭', '宁国', '宁海', '南华北', '南河川', '泥河子', '宁家', '南靖', '牛家', '能家', '南口', '南口前', '
南朗', '乃林', '尼勒克', '那罗', '宁陵县', '奈曼', '宁明', '南木', '南平南', '那铺', '南桥', '那曲', '暖泉', '南台', '南头', '宁武', '南湾子', '南翔
北', '宁乡', '内乡', '牛心台', '南峪', '娘子关', '南召', '南杂木', '蓬安', '平安', '平安驿', '磐安镇', '平安镇', '蒲城东', '蒲城', '裴德', '偏店', '
���顶山西', '坡底下', '瓢儿屯', '平房', '平岗', '平关', '盘关', '平果', '徘徊北', '平河口', '平湖', '盘锦北', '潘家店', '皮口南', '普兰店', '偏岭'
, '平山', '彭山', '皮山', '磐石', '平社', '彭水', '平台', '平田', '莆田', '葡萄菁', '普湾', '平旺', '平型关', '普雄', '郫县', '平洋', '彭阳', '平遥', '平邑', '平原堡', '平原', '平峪', '彭泽', '邳州', '平庄', '泡子', '平庄南', '乾安', '庆安', '迁安', '祁东北', '七甸', '曲阜东', '庆丰', '奇峰塔', '
曲阜', '琼海', '秦皇岛', '千河', '清河', '清河门', '清华园', '全椒', '渠旧', '潜江', '秦家', '綦江', '祁家堡', '清涧县', '秦家庄', '七里河', '秦岭',
'渠黎', '青龙', '青龙山', '祁门', '前磨头', '青山', '确山', '前山', '清水', '戚墅堰', '青田', '桥头', '青铜峡', '前卫', '前苇塘', '渠县', '祁县', '青县', '桥西', '清徐', '旗下营', '千阳', '沁阳', '泉阳', '祁阳北', '七营', '庆阳山', '清远', '清原', '钦州东', '钦州', '青州市', '瑞安', '荣昌', '瑞昌', '如皋', '容桂', '任丘', '乳山', '融水', '热水', '容县', '饶阳', '汝阳', '绕阳河', '汝州', '石坝', '上板城', '施秉', '上板城南', '世博园', '双城北', '舒城', '商城', '莎车', '顺昌', '神池', '沙城', '石城', '山城镇', '山丹', '顺德', '绥德', '水洞', '商都', '十渡', '四道湾', '顺德学院', '绅坊', '双
丰', '四方台', '水富', '三关口', '桑根达来', '韶关', '上高镇', '上杭', '沙海', '松河', '沙河', '沙河口', '赛汗塔拉', '沙河市', '沙后所', '山河屯', '
三河县', '四合永', '三汇镇', '双河镇', '石河子', '三合庄', '三家店', '水家湖', '沈家河', '松江河', '尚家', '孙家', '沈家', '双吉', '松江', '三江口',
'司家岭', '松江南', '石景山南', '邵家堂', '三江县', '三家寨', '十家子', '松江镇', '施家嘴', '深井子', '什里店', '疏勒', '疏勒河', '舍力虎', '石磷', '石林', '双辽', '绥棱', '石岭', '石林南', '石龙', '萨拉齐', '索伦', '商洛', '沙岭子', '石门县北', '三门峡南', '三门县', '石门县', '三门峡西', '肃宁',
'宋', '双牌', '四平东', '遂平', '沙坡头', '沙桥', '商丘南', '水泉', '石泉县', '石桥子', '石人城', '石人', '山市', '神树', '鄯善', '三水', '泗水', '石山', '松树', '首山', '三十家', '三十里堡', '松树镇', '松桃', '索图罕', '三堂集', '石头', '神头', '沙沱', '上万', '孙吴', '沙湾县', '歙县', '遂溪', '
沙县', '绍兴', '石岘', '上西铺', '石峡子', '沭阳', '绥阳', '寿阳', '水洋', '三阳川', '上腰墩', '三营', '顺义', '三义井', '三源浦', '上虞', '三原', '
上园', '水源', '桑园子', '绥中北', '苏州北', '宿州东', '深圳东', '深州', '孙镇', '绥中', '尚志', '师庄', '松滋', '师宗', '苏州园区', '苏州新区', '泰
安', '台安', '通安驿', '桐柏', '通北', '桐城', '汤池', '郯城', '铁厂', '桃村', '通道', '田东', '天岗', '土贵乌拉', '通沟', '太谷', '塔哈', '棠海', '
唐河', '泰和', '太湖', '团结', '谭家井', '陶家屯', '唐家湾', '统军庄', '泰康', '吐列毛杜', '图里河', '铜陵', '田林', '亭亮', '铁力', '铁岭西', '图们
北', '天门', '天门南', '太姥山', '土牧尔台', '土门子', '洮南', '潼南', '太平川', '太平镇', '图强', '台前', '天桥岭', '土桥子', '汤山城', '桃山', '塔
石嘴', '通途', '汤旺河', '同心', '土溪', '桐乡', '田阳', '天义', '汤阴', '驼腰岭', '太阳山', '汤原', '塔崖驿', '滕州东', '台州', '天祝', '滕州', '天
镇', '桐子林', '天柱山', '文安', '武安', '王安镇', '吴堡', '旺苍', '五叉沟', '文昌', '温春', '五大连池', '文登', '五道沟', '五道河', '文地', '卫东',
'武当山', '望都', '乌尔旗汗', '潍坊', '万发屯', '王府', '瓦房店西', '王岗', '武功', '湾沟', '吴官田', '乌海', '苇河', '卫辉', '吴家川', '五家', '威箐', '午汲', '渭津', '王家湾', '倭肯', '五棵树', '五龙背', '乌兰哈达', '万乐', '瓦拉干', '温岭', '五莲', '乌拉特前旗', '乌拉山', '卧里屯', '渭南北', '
乌奴耳', '万宁', '万年', '渭南南', '渭南镇', '沃皮', '吴桥', '汪清', '武清', '武山', '文水', '魏善庄', '王瞳', '五台山', '王团庄', '五五', '无锡东',
'卫星', '闻喜', '武乡', '无锡新区', '武穴', '吴圩', '王杨', '武义', '五营', '瓦窑田', '五原', '苇子沟', '韦庄', '五寨', '王兆屯', '微子镇', '魏杖子', '新安', '兴安', '新安县', '新保安', '下板城', '西八里', '宣城', '兴城', '小村', '新绰源', '下城子', '新城子', '喜德', '小得江', '西大庙', '小董', '
小东', '信丰', '襄汾', '息烽', '新干', '孝感', '西固城', '西固', '夏官营', '西岗子', '襄河', '新和', '宣和', '斜河涧', '新华屯', '新华', '新化', '宣
化', '兴和西', '小河沿', '下花园', '小河镇', '徐家', '峡江', '新绛', '辛集', '新江', '西街口', '许家屯', '许家台', '谢家镇', '兴凯', '小榄', '香兰',
'兴隆店', '新乐', '新林', '小岭', '新李', '西林', '西柳', '仙林', '新立屯', '兴隆镇', '新立镇', '新民', '西麻山', '下马塘', '孝南', '咸宁北', '兴宁', '咸宁', '犀浦东', '西平', '兴平', '新坪田', '霞浦', '溆浦', '犀浦', '新青', '新邱', '兴泉堡', '仙人桥', '小寺沟', '杏树', '浠水', '下社', '徐水', '
夏石', '小哨', '新松浦', '杏树屯', '许三湾', '湘潭', '邢台', '仙桃西', '下台子', '徐闻', '新窝铺', '修武', '新县', '息县', '西乡', '湘乡', '西峡', '
孝西', '小新街', '新兴县', '西小召', '小西庄', '向阳', '旬阳', '旬阳北', '襄阳东', '兴业', '小雨谷', '信宜', '小月旧', '小扬气', '祥云', '襄垣', '夏
邑县', '新友谊', '新阳镇', '徐州东', '新帐房', '悬钟', '新肇', '忻州', '汐子', '西哲里木', '新杖子', '姚安', '依安', '永安', '永安乡', '亚布力', '元
宝山', '羊草', '秧草地', '阳澄湖', '迎春', '叶城', '盐池', '砚川', '阳春', '宜城', '应城', '禹城', '晏城', '阳城', '阳岔', '郓城', '雁翅', '云彩岭',
'虞城县', '营城子', '英德', '永登', '尹地', '永定', '雁荡山', '于都', '园墩', '英德西', '永丰营', '杨岗', '阳高', '阳谷', '友好', '余杭', '沿河城', '岩会', '羊臼河', '永嘉', '营街', '盐津', '余江', '燕郊', '姚家', '岳家井', '一间堡', '英吉沙', '云居寺', '燕家庄', '永康', '营口东', '银浪', '永郎',
'宜良北', '永乐店', '伊拉哈', '伊林', '杨陵', '彝良', '杨林', '余粮堡', '杨柳青', '月亮田', '义马', '阳明堡', '玉门', '云梦', '元谋', '一面山', '沂南', '宜耐', '伊宁东', '营盘水', '羊堡', '阳泉北', '乐清', '焉耆', '源迁', '姚千户屯', '阳曲', '榆树沟', '月山', '玉石', '玉舍', '偃师', '沂水', '榆社', '颍上', '窑上', '元氏', '杨树岭', '野三坡', '榆树屯', '榆树台', '鹰手营子', '源潭', '牙屯堡', '烟筒山', '烟筒屯', '羊尾哨', '越西', '攸县', '永修', '玉溪西', '弋阳', '余姚', '酉阳', '岳阳东', '阳邑', '鸭园', '鸳鸯镇', '燕子砭', '仪征', '宜州', '兖州', '迤资', '羊者窝', '杨杖子', '镇安', '治安',
'招柏', '张百湾', '中川机场', '枝城', '子长', '诸城', '邹城', '赵城', '章党', '正定', '肇东', '照福铺', '章古台', '赵光', '中和', '中华门', '枝江北', '钟家村', '朱家沟', '紫荆关', '周家', '诸暨', '镇江南', '周家屯', '褚家湾', '湛江西', '朱家窑', '曾家坪子', '张兰', '镇赉', '枣林', '扎鲁特', '扎赉
诺尔西', '樟木头', '中牟', '中宁东', '中宁', '中宁南', '镇平', '漳平', '泽普', '枣强', '张桥', '章丘', '朱日和', '泽润里', '中山北', '樟树东', '珠斯
花', '中山', '柞水', '钟山', '樟树', '珠窝', '张维屯', '彰武', '棕溪', '钟祥', '资溪', '镇西', '张辛', '正镶白旗', '紫阳', '枣阳', '竹园坝', '张掖',
'镇远', '漳州东', '漳州', '壮志', '子洲', '中寨', '涿州', '咋子', '卓资山', '株洲西', '郑州西', '阿巴嘎旗', '阿尔山北', '阿勒泰', '安仁', '安顺西', '安图西', '安阳东', '博白', '八步', '栟茶', '保定东', '八方山', '白沟', '滨海', '滨海北', '宝鸡南', '北井子', '白马井', '宝清', '璧山', '白沙铺', '白
水县', '板塘', '本溪新城', '彬县', '宾阳', '白洋淀', '百宜', '白音华南', '巴中东', '滨州', '霸州西', '澄城', '城固北', '查干湖', '巢湖东', '从江', '
茶卡', '长临河', '茶陵南', '常平东', '常平南', '长庆桥', '长寿北', '长寿湖', '潮汕', '常山', '长沙西', '朝天', '长汀南', '长武', '长兴', '苍溪', '长
阳', '潮阳', '城子坦', '东安东', '德保', '都昌', '东岔', '东城南', '东戴河', '丹东西', '东二道河', '大丰', '大方南', '东港北', '大孤山', '东莞', '鼎
湖东', '鼎湖山', '道滘', '洞井', '垫江', '大苴', '大荔', '大朗镇', '大青沟', '德清', '砀山南', '大石头南', '当涂东', '大通西', '大旺', '定西北', '德
兴东', '德兴', '丹霞山', '大冶北', '都匀东', '东营南', '大余', '定州东', '端州', '大足南', '峨眉山', '阿房宫', '鄂州东', '防城港北', '凤城东', '富川', '繁昌西', '丰都', '涪陵北', '枫林', '富宁', '佛坪', '法启', '芙蓉南', '复盛', '抚松', '佛山西', '福山镇', '福田', '富源北', '抚远', '抚州东', '抚州', '高安', '广安南', '贵安', '高碑店东', '恭城', '藁城南', '贵定北', '葛店南', '贵定县', '广汉北', '高花', '革居', '关岭', '桂林西', '光明城', '广宁', '广宁寺', '广南县', '桂平', '弓棚子', '光山', '谷山', '观沙岭', '古田北', '广通北', '高台南', '古田会址', '贵阳北', '贵阳东', '高邑西', '惠安', '淮北北', '鹤壁东', '寒葱沟', '霍城', '珲春', '邯郸东', '惠东', '哈达铺', '海东西', '洪洞西', '哈尔滨北', '合肥北城', '合肥南', '黄冈', '黄冈东', '横沟
桥东', '黄冈西', '洪河', '怀化南', '黄河景区', '花湖', '惠环', '后湖', '怀集', '河口北', '黄流', '黄陵南', '鲘门', '虎门', '侯马西', '衡南', '淮南东', '合浦', '霍邱', '怀仁东', '华容东', '华容南', '黄石北', '黄山北', '衡水北', '贺胜桥东', '和硕', '花山南', '荷塘', '黄土店', '合阳北', '海阳北', '槐荫', '鄠邑', '花园口', '霍州东', '惠州南', '建安', '泾川', '景德镇北', '旌德', '尖峰', '近海', '蛟河西', '军粮城北', '将乐', '贾鲁河', '九郎山', '即
墨北', '剑门关', '建宁县北', '江宁', '江宁西', '建瓯西', '酒泉南', '句容西', '建水', '尖山', '界首市', '绩溪北', '介休东', '泾县', '靖西', '进贤南',
'江油北', '嘉峪关南', '简阳南', '金银潭', '靖宇', '金月湾', '缙云西', '晋中', '景州', '开封北', '开福寺', '开化', '凯里南', '库伦', '昆明南', '葵潭', '开阳', '隆安东', '来宾北', '灵璧', '寮步', '绿博园', '隆昌北', '乐昌东', '临城', '罗城', '陵城', '老城镇', '龙洞堡', '乐都南', '娄底南', '乐东', '
离堆公园', '陆丰', '龙丰', '禄丰南', '临汾西', '临高南', '麓谷', '滦河', '漯河西', '罗江东', '柳江', '利津南', '兰考南', '兰陵北', '龙里北', '沥林北', '醴陵东', '陇南', '梁平南', '礼泉', '灵石东', '乐山', '龙市', '溧水', '洛湾三江', '莱西北', '溧阳', '临邑', '柳园南', '鹿寨北', '阆中', '临泽南', '马鞍山东', '毛陈', '明港东', '民和南', '闵集', '马兰', '民乐', '弥勒', '玛纳斯', '牟平', '闽清北', '民权北', '眉山东', '庙山', '岷县', '门源', '暮云', '蒙自北', '孟庄', '蒙自', '南部', '南曹', '南充北', '南城', '南昌西', '宁东南', '宁东', '南芬北', '南丰', '南湖东', '内江北', '南江', '南江口', '南陵', '尼木', '南宁东', '南宁西', '南平北', '宁强南', '南雄', '纳雍', '南阳寨', '普安', '普安县', '屏边', '平坝南', '平昌', '普定', '平度', '皮口', '
盘龙城', '普宁', '平南南', '彭山北', '盘山', '坪上', '萍乡北', '濮阳', '鄱阳', '平遥古城', '平原东', '普者黑', '盘州', '彭州', '秦安', '青白江东', '
青川', '青岛北', '祁东', '青堆', '前锋', '曲靖北', '曲江', '青莲', '齐齐哈尔南', '清水北', '青神', '岐山', '庆盛', '清水县', '曲水县', '祁县东', '乾
县', '旗下营南', '祁阳', '全州南', '棋子湾', '仁布', '荣昌北', '荣成', '瑞昌西', '如东', '榕江', '日喀则', '饶平', '宋城路', '三道湖', '邵东', '三都
县', '胜芳', '双峰北', '商河', '泗洪', '四会', '石家庄东', '三江南', '三井子', '双流机场', '石林西', '沙岭子西', '双流西', '三明北', '嵩明', '树木岭', '苏尼特左旗', '山坡东', '石桥', '沈丘', '鄯善北', '狮山北', '三水北', '松山湖北', '狮山', '三水南', '韶山南', '三穗', '石梯', '汕尾', '歙县北', '绍兴北', '绍兴东', '泗县', '始兴', '泗阳', '双阳', '邵阳北', '松原北', '山阴', '深圳北', '神州', '深圳坪山', '石嘴山', '石柱县', '台安南', '桃村北', '
田东北', '土地堂东', '太谷西', '吐哈', '通海', '太和北', '天河机场', '天河街', '通化县', '同江', '铜陵北', '吐鲁番北', '泰宁', '铜仁南', '天水南', '
通渭', '田心东', '汤逊湖', '藤县', '太原南', '通远堡西', '通州', '文登东', '五府山', '威虎岭北', '威海北', '乌兰察布', '五龙背东', '乌龙泉南', '乌鲁
木齐', '五女山', '武胜', '无为', '瓦屋山', '闻喜西', '武义北', '武夷山北', '武夷山东', '婺源', '渭源', '万州北', '武陟', '梧州南', '兴安北', '许昌东', '项城', '新都东', '西丰', '先锋', '湘府路', '襄汾西', '孝感北', '孝感东', '西湖东', '新化南', '新晃西', '新津', '小金口', '辛集南', '新津南', '咸宁东', '咸宁南', '溆浦南', '西平西', '湘潭北', '邢台东', '西乌旗', '修武西', '萧县北', '新乡东', '新余北', '西阳村', '信阳东', '咸阳秦都', '仙游', '新
郑机场', '香樟路', '迎宾路', '永城北', '运城北', '永川东', '禹城东', '宜春', '岳池', '云东海', '姚渡', '云浮东', '永福南', '雨格', '洋河', '永济北',
'弋江', '于家堡', '延吉西', '永康南', '运粮河', '炎陵', '杨陵南', '伊敏', '郁南', '银瓶', '永寿', '阳朔', '云山', '玉山南', '银滩', '永泰', '鹰潭北', '烟台南', '伊通', '尤溪', '云霄', '宜兴', '玉溪', '阳信', '应县', '攸县南', '洋县西', '余姚北', '榆中', '诏安', '正定机场', '纸坊东', '准格尔', '庄
河北', '昭化', '织金北', '张家川', '芷江', '织金', '仲恺', '曾口', '左岭', '樟木头东', '驻马店西', '漳浦', '肇庆东', '庄桥', '昭山', '钟山西', '漳县', '资阳北', '张掖西', '资中北', '涿州东', '枣庄东', '卓资东', '郑州东', '株洲南']
code=['VAP', 'BOP', 'BJP', 'VNP', 'BXP', 'IZQ', 'CUW', 'CQW', 'CRW', 'GGQ', 'SHH', 'SNH', 'AOH', 'SXH', 'TBP', 'TJP', 'TIP', 'TXP', 'CCT', 'CET', 'CRT', 'ICW', 'CNW', 'CDW', 'CSQ', 'CWQ', 'FZS', 'FYS', 'GIW', 'GZQ', 'GXQ', 'HBB', 'VBB', 'VAB', 'HFH', 'HTH', 'NDC', 'HHC', 'KEQ', 'HMQ', 'VUQ', 'HGH', 'HZH', 'XHH', 'JNK', 'JAK', 'JGK', 'KMM', 'KXM', 'LSO', 'LVJ', 'LZJ', 'LAJ', 'NCG', 'NJH', 'NKH', 'NNZ', 'VVP', 'SJP', 'SYT', 'SBT', 'SDT',
'SOT', 'TBV', 'TDV', 'TYV', 'WHN', 'KNM', 'WMR', 'EAY', 'XAY', 'CAY', 'XNO', 'YIJ', 'ZZF', 'ART', 'AKY', 'ASR', 'AHX', 'AKR', 'APT', 'AQH', 'ASW', 'AST', 'AYF', 'BAB', 'BBH', 'BCT', 'BHZ', 'BEL', 'BAP', 'BJY', 'BJB', 'BKX', 'BIZ', 'HJL', 'BTT', 'BDC', 'BTC', 'BXR', 'BXT', 'BEC', 'BXJ', 'BZH', 'CBN', 'VGQ', 'CDP', 'CDT', 'CFD', 'CDG', 'CEH', 'CPP', 'CRG', 'CTT', 'CDB', 'CXK', 'COM', 'CXT', 'CBF', 'IYH', 'CZJ', 'CZH', 'CZQ', 'CZF', 'COP', 'CZZ', 'RNT', 'DCT', 'DUT', 'DFB', 'DMQ', 'DHD', 'DHJ', 'DHL', 'DHT', 'DJB', 'DFP', 'DDW', 'DFT', 'DKM', 'DLT', 'DNG', 'DZX', 'DOC', 'DQT', 'DTV', 'DPK', 'DUX', 'RYW', 'DOF', 'RXW', 'DZP', 'EJC', 'RLC', 'ESN', 'FES', 'FJQ', 'FLV', 'FLW', 'FRX', 'FET', 'FSQ', 'FXD', 'FYH', 'GRO', 'GHW', 'GJV', 'GBZ', 'GRX', 'GLZ', 'GXN', 'GSN', 'GNJ', 'GYW', 'GBQ', 'GZG', 'GLT', 'GBT', 'AUH', 'HRH', 'HMB', 'HVN', 'HBV', 'KCN', 'HCY', 'HDP', 'HDB', 'HGB', 'HTT', 'HEM', 'HJB', 'HHQ', 'HKN', 'HLD', 'HRX', 'HWD', 'HLB', 'HMV', 'HMR', 'HAH', 'HNB', 'EUH', 'HQM', 'HBP', 'HRP', 'OSN', 'HSY', 'HKH', 'HSN', 'HSP', 'HYQ',
'HIK', 'HXZ', 'HOY', 'HCQ', 'VAG', 'JAL', 'JBG', 'JCF', 'JJZ', 'JCG', 'JFF', 'JGX', 'JGG', 'JHL', 'RNH', 'JBH', 'JJG', 'JLL', 'JMN', 'JMB', 'JIK', 'JAC', 'JQJ', 'JUH', 'JIQ', 'JTL', 'JVJ', 'JXB', 'JRH', 'JGJ', 'JFW', 'JZD', 'JZT', 'JKP', 'KLR', 'KFF', 'KLV', 'KLW', 'KSR', 'KNH', 'KTR', 'KYT', 'UAH', 'LBF', 'UCH', 'LCW', 'LKZ', 'LCN', 'LCG', 'UTP', 'LDL', 'LDQ', 'LFV', 'LGP', 'LHC', 'LON', 'LWJ', 'UHP', 'LHM', 'LQL', 'LJL', 'LHV', 'LLG', 'LKV', 'UPP', 'UMW', 'LVV', 'LST', 'LWH', 'LXJ', 'LEQ', 'UEP', 'LYS', 'LYQ', 'LYF', 'UKH', 'LDF', 'LVK', 'LLF', 'DHR', 'LYD', 'LYL', 'LZX', 'LZZ', 'LZD', 'MCN', 'MDX', 'MDB', 'MRX', 'MGH', 'MHX', 'MVX', 'MDQ', 'MMZ', 'MSB', 'MJT', 'VAW', 'MYW', 'MOQ', 'MLX', 'NVH', 'NGH', 'NCB', 'NCW', 'NDZ', 'NMP', 'NFT', 'NHX', 'NGX', 'NJW', 'NPS', 'NUH', 'NFF', 'NZX', 'PEN', 'PVD', 'PIJ', 'POJ', 'PQP', 'PSQ', 'PXG', 'PXZ', 'PCW', 'PRW', 'QRN', 'QSW', 'QDK', 'QYP', 'QJM', 'QNW', 'QEB', 'QHX', 'QTB', 'QVV', 'QRS', 'QYS', 'QEH', 'RAZ', 'RQJ', 'RJG', 'RZK', 'SCB', 'SFB', 'SGQ', 'SHD', 'SHB', 'SFX', 'SXT', 'SLL',
'SMS', 'OMY',c 'SMF', 'ONY', 'NIW', 'SPT', 'SQF', 'SRG', 'SSQ', 'OAH', 'OTQ', 'SWS', 'OEP', 'SEQ', 'JUQ', 'SYQ', 'SNN', 'SSB', 'VYT', 'SZH', 'SZQ', 'OXH', 'SZN', 'SUV', 'OSQ', 'TBQ', 'TVX', 'TGY', 'TGP', 'TXX', 'THL', 'TLX', 'TFR', 'TLD', 'TLT', 'TPT', 'TML', 'RDQ', 'FUP', 'TFT', 'TAK', 'TSP', 'TSJ', 'TYT', 'TQT', 'UTH', 'TZW', 'TAP', 'WCB', 'WCN', 'WDT', 'WKK', 'WHH', 'WXC', 'WJT', 'WLW', 'WWT', 'WNY', 'WSM', 'WIT', 'WUJ', 'WWJ', 'WXH', 'WXR', 'WPB', 'WAS', 'WYY', 'WYW', 'WZZ', 'RZH', 'VRH', 'ECW', 'XCF', 'ENW', 'XFB', 'XGV', 'EUG', 'XHY', 'EFQ', 'XLQ', 'XTC', 'EXP', 'XKS', 'XMS', 'XBS', 'XST', 'ETW', 'XTG', 'XWM', 'XXF', 'XUN', 'XYY', 'XFN', 'XYT', 'VIH', 'XRZ', 'XUG', 'XCH', 'YWY', 'YBW', 'YWB', 'YBD', 'HAN', 'YCW', 'AFH', 'YCN', 'YNV', 'YCB', 'YCV', 'YBP', 'YCG', 'YET', 'YGW', 'YIV', 'YJL', 'YKT', 'YKX', 'YNY', 'YLZ', 'ALY', 'TWQ', 'YPB', 'YMR', 'YAY', 'YZW', 'YPV', 'YNP', 'YYV', 'YQB', 'AQP', 'NUW', 'YNG', 'AOP', 'YRT', 'YTG', 'YAK', 'YEX', 'ATP', 'YWH', 'YON', 'YXD', 'AEQ', 'YYQ', 'YUQ', 'AOQ', 'YLH', 'ZBK', 'ZDV', 'ZGW',
'ZHQ', 'ZIQ', 'ZJZ', 'ZJH', 'DIQ', 'ZKP', 'ZMP', 'ZKN', 'ZLC', 'ZTX', 'ZDN', 'ZVQ', 'ZIT', 'ZDW', 'ZWJ', 'ZYW', 'ZIW', 'ZEK', 'ZZW', 'ZZQ', 'ZFK', 'AAX', 'ACB', 'ADX', 'ARW', 'ADP', 'ADO', 'AGT', 'YED', 'AHP', 'PKQ', 'AJJ', 'ARH', 'AJB', 'AJD', 'PYW', 'AER', 'AYY', 'ALD', 'AUZ', 'ASX', 'ALN', 'JTX', 'AZM', 'APH', 'AXT', 'ATV', 'ASH', 'ATR', 'ATL', 'AXS', 'BWQ', 'BPW', 'BGV', 'BMH', 'BCR', 'BUP', 'BEP', 'BDP', 'BPP', 'ILP', 'BNN', 'BGM', 'BUT', 'BIY', 'BVC', 'BWH', 'BEY', 'BJJ', 'BJM', 'IBQ', 'BLQ', 'BBM', 'BSB', 'BTD', 'BKD', 'BKB', 'BAT', 'BRZ', 'BOR', 'BQC', 'BLX', 'BNB', 'BOZ', 'BLB', 'BLR', 'BND', 'AAP', 'BMD', 'BNM', 'BMB', 'BRP', 'RPD', 'BQP', 'BQB', 'BQL', 'BAY', 'BSY', 'BPM', 'BAL', 'BUM', 'FDC', 'BTQ', 'BZP', 'BYP', 'BHT', 'BXK', 'VXD', 'BYC', 'BYB', 'BIV', 'BAC', 'BID', 'BYT', 'BNJ', 'BCD', 'IEW', 'RMP', 'BVP', 'CIN', 'CBC', 'CEJ', 'CCM', 'CCP', 'CID', 'CAX', 'CGT', 'CEF', 'CGV', 'CGY', 'CAJ', 'CZB', 'WBW', 'CHB', 'CHZ', 'CKT', 'CHP', 'CIH', 'CJT', 'CJX', 'CAM', 'CJY', 'CLK', 'CYP', 'CUQ', 'CLP', 'CLT', 'CMB', 'CNJ',
'VBP', 'DAQ', 'CPM', 'CQB', 'CON', 'CSB', 'EFW', 'CSP', 'CST', 'CSL', 'CSC', 'CVT', 'CES', 'CPT', 'CQQ', 'CIP', 'CNZ', 'CXQ', 'CRP', 'CFH', 'CYK', 'CAL', 'CEK', 'CEX', 'CYL', 'CDD', 'CYD', 'CYF', 'CZL', 'CUH', 'ESH', 'CXH', 'CKQ', 'CVK', 'CFP', 'CWM', 'ICQ', 'CBP', 'DAG', 'RAT', 'DBJ', 'DBC', 'DBD', 'RBT', 'DYJ', 'DBB', 'RDT', 'DGJ', 'DVW', 'DDB', 'DKJ', 'DRD', 'DRX', 'UFQ', 'DGY', 'DIL', 'DMM', 'DTT', 'RGW', 'DGP', 'DHB', 'DHP', 'DQD', 'SOQ', 'DQH', 'DXT', 'DJT', 'DKB', 'DJL', 'DKP', 'RVD', 'DHO', 'DLC', 'DLB', 'DLD', 'DIC', 'DTX', 'DLV', 'DNC', 'GZT', 'DMD', 'DEP', 'DNF', 'DNZ', 'DPD', 'RPP', 'DPI', 'DVT', 'LFX', 'DQX', 'DML', 'DQB', 'MOH', 'RHX', 'DRQ', 'DKH', 'RWW', 'DWT', 'DPM', 'DSL', 'DYC', 'RZT', 'DBH', 'DQK', 'DGT', 'DBM', 'DTL', 'RUH', 'DNT', 'DRJ', 'DFJ', 'DWJ', 'DZZ', 'DFM', 'DXL', 'DXX', 'DSJ', 'DXM', 'DXG', 'DKV', 'DXV', 'RXP', 'DXD', 'DYH', 'DYW', 'DYX', 'DYN', 'EXH', 'IAW', 'DBV', 'DYV', 'EWH', 'RYV', 'DYZ', 'DJP', 'DZD', 'DTJ', 'DIP', 'DCH', 'DVQ', 'DNV', 'DFZ', 'DZV', 'DWV', 'ROP', 'DXP', 'DZY', 'DAP', 'RZP',
'EBW', 'RDP', 'RDX', 'EEC', 'RLD', 'ELA', 'EMW', 'RML', 'RYJ', 'ECN', 'FAS', 'FCG', 'FNG', 'FIH', 'FEM', 'FHX', 'FHR', 'FHT', 'FEV', 'FHH', 'FIB', 'FTT', 'FLJ', 'FTB', 'FZB', 'FNH', 'AKH', 'FNP', 'FQS', 'VMW', 'FSJ', 'FUQ', 'FSV', 'FST', 'FKP', 'FSZ', 'FTX', 'FYP', 'FDY', 'FXY', 'FEY', 'FXK', 'FUH', 'FAV', 'FBT', 'FYG', 'FYM', 'FYT', 'FYX', 'FBG', 'FZY', 'FZC', 'VZK', 'GFP', 'VJW', 'GBP', 'GBD', 'GDJ', 'GCN', 'GEP', 'GCV', 'GZB', 'GRH', 'GTW', 'IDW', 'GDV', 'GGZ', 'GVP', 'GGT', 'GGL', 'GGJ', 'GGP', 'GAX', 'GEX', 'GDT', 'GKT', 'GLJ', 'GEJ', 'GFM', 'GHT', 'GLF', 'VOW', 'GLP', 'GMK', 'GMC', 'GRT', 'GNT', 'GNM', 'GPF', 'GEY', 'GAG', 'GQD', 'GQY', 'GZD', 'GST', 'GSW', 'GSP', 'GSL', 'GSD', 'GXD', 'GTJ', 'GAY', 'GTS', 'GTP', 'KEP', 'GXG', 'GYH', 'GXF', 'GIP', 'GYF', 'GAW', 'GUJ', 'GYL', 'GYD', 'GZS', 'GNQ', 'GEH', 'GZY', 'GZJ', 'GSQ', 'GXT', 'GOT', 'GSS', 'GAT', 'AMH', 'HWN', 'HIH', 'VXN', 'HBL', 'HEB', 'HAF', 'XEG', 'VCQ', 'HCZ', 'HCN', 'HCT', 'WKW', 'HCJ', 'HCP', 'HXT', 'HGC', 'HDV', 'HFR', 'HFG', 'HXJ', 'HGP', 'HYM', 'IGW', 'HHT',
'VHD', 'HUD', 'HJJ', 'HJR', 'HFM', 'HIM', 'HJF', 'HJV', 'HJS', 'HJT', 'HDC', 'HXP', 'HJM', 'HKJ', 'KOH', 'HKG', 'HUB', 'HPD', 'HHB', 'HIT', 'HOB', 'HIB', 'ULY', 'HRB', 'VLB', 'HAT', 'HLL', 'HIL', 'HAX', 'VTJ', 'HLT', 'VEH', 'HYP', 'HHL', 'HNH', 'HMJ', 'VAQ', 'HZM', 'VQH', 'HEY', 'HRV', 'HRN', 'HDY', 'HDL', 'VSR', 'VSB', 'VSQ', 'HSQ', 'HOT', 'VCH', 'HHP', 'HSJ', 'HUT', 'HSO', 'HEQ', 'VSJ', 'HQB', 'VTK', 'VTR', 'VTQ', 'HZT', 'HWK', 'RWH', 'VXB', 'HYY', 'VHB', 'VTB', 'HTJ', 'VIX', 'HAY', 'HYK', 'HVQ', 'HUW', 'HQY', 'HGJ', 'WHW', 'VIQ', 'HUN', 'HNO', 'HYJ', 'VZH', 'HZZ', 'VON', 'HZV', 'VXQ', 'JRT', 'JIY', 'JBD', 'JEF', 'JCJ', 'JCK', 'JNV', 'JFD', 'JDB', 'JFP', 'JOB', 'UDH', 'JST', 'VGP', 'JHP', 'JHX', 'JHB', 'JHR', 'JIR', 'JHZ', 'AJH', 'VJD', 'JJS', 'JEY', 'JJB', 'JJW', 'JKT', 'JLJ', 'JMM', 'JWQ', 'JES', 'JOK', 'JNP', 'JVS', 'JPC', 'JQX', 'SSX', 'EGH', 'JSH', 'JCN', 'JRN', 'JVV', 'JSL', 'JET', 'JOP', 'JIB', 'EAH', 'JTB', 'JOM', 'JTJ', 'JNL', 'JWX', 'JUG', 'JKK', 'JUK', 'JXV', 'JXH', 'JJP', 'EPH', 'JXT', 'UEH', 'JRQ', 'JYS', 'JYW',
'JYK', 'JYZ', 'JYH', 'JYJ', 'SZL', 'JYF', 'JXJ', 'JZK', 'WEF', 'JZH', 'JEQ', 'JBN', 'JXK', 'JXP', 'JOD', 'JOF', 'JVP', 'JYD', 'KAT', 'KCR', 'KCP', 'KDX', 'KDT', 'KOB', 'KDC', 'KAW', 'KJB', 'KQX', 'KLC', 'KHR', 'KQL', 'KSH', 'KAB', 'KSB', 'KTT', 'KXZ', 'KAM', 'KHX', 'KXT', 'KZP', 'UBZ', 'LLT', 'LPF', 'LUQ', 'LCQ', 'UCP', 'UCK', 'LCK', 'LDY', 'LRC', 'LDO', 'LDP', 'LVP', 'LVM', 'LJP', 'LOP', 'LFP', 'UFD', 'LNB', 'LGM', 'LOM', 'LGJ', 'LGB', 'UFH', 'LXX', 'LHX', 'JID', 'LNL', 'KLH', 'LHP', 'UNP', 'LEX', 'LRT', 'UDT', 'LVT', 'LKS', 'UJH', 'LJB', 'LJW', 'LJZ', 'UJT', 'LJX', 'UJL', 'LHB', 'ULK', 'LIJ', 'LKF', 'LKB', 'LKQ', 'LAX', 'LAB', 'LRM', 'LLW', 'LWQ', 'LLB', 'UWZ', 'UAP', 'LMX', 'LMB', 'LMJ', 'UNG', 'UQW', 'LPM', 'LPP', 'UPJ', 'LPG', 'UQK', 'UQJ', 'UTW', 'LUM', 'UDQ', 'LGT', 'USP', 'LIQ', 'USH', 'LRN', 'LAF', 'LMK', 'LSV', 'LUL', 'LSG', 'LBT', 'LSD', 'LAS', 'LSB', 'LET', 'LTZ', 'LAR', 'LTP', 'LBM', 'LVZ', 'LTJ', 'VLJ', 'LWK', 'LRJ', 'LNJ', 'UXK', 'LXB', 'LXY', 'LXQ', 'LUG', 'LXK', 'LXC', 'UXP', 'LYY', 'LYK', 'LYT', 'UYK', 'LDD',
'UIH', 'LNF', 'LXL', 'LMH', 'LVS', 'LYX', 'LAQ', 'LYP', 'LPQ', 'LEJ', 'LZT', 'UAQ', 'LIW', 'LIZ', 'LZS', 'LZA', 'LEM', 'LQJ', 'MAH', 'MBY', 'MGY', 'MBN', 'MCF', 'MCL', 'MAP', 'MNF', 'KPM', 'MUQ', 'MOB', 'MDF', 'MRB', 'MGN', 'MHL', 'MHZ', 'MGB', 'MHQ', 'MQQ', 'MHB', 'MLZ', 'MLL', 'MLD', 'MLB', 'MID', 'MGM', 'MUD', 'MLQ', 'MNR', 'UGW', 'MPQ', 'MQB', 'MQS', 'MQF', 'MUT', 'MAB', 'MSW', 'MKW', 'MOM', 'MST', 'MEB', 'MVY', 'MVQ', 'MUP', 'MMW', 'MYS', 'MUR', 'MZJ', 'MEY', 'MFQ', 'NAB', 'NAT', 'NBK', 'NCK', 'NSP', 'NCZ', 'NES', 'NGP', 'NFP', 'NLT', 'NNH', 'NHH', 'NHS', 'NHJ', 'NHD', 'NVT', 'NJS', 'NJB', 'NJD', 'NKP', 'NKT', 'NNQ', 'NLD', 'NIR', 'ULZ', 'NLF', 'NMD', 'NMZ', 'NMX', 'NNS', 'NPZ', 'NQD', 'NQO', 'NQJ', 'NTT', 'NOQ', 'NWV', 'NWP', 'NEH', 'NXQ', 'NXF', 'NXT', 'NUP', 'NIP', 'NAF', 'NZT', 'PAW', 'PAL', 'PNO', 'PAJ', 'PZT', 'PEY', 'PCY', 'PDB', 'PRP', 'BFF', 'PXJ', 'PRT', 'PFB', 'PGL', 'PGM', 'PAM', 'PGZ', 'PHP', 'PHM', 'PHQ', 'PBD', 'PDP', 'PKT', 'PLT', 'PNT', 'PSB', 'PSW', 'PSR', 'PSL', 'PSV', 'PHW', 'PVT', 'PTM', 'PTS', 'PTW',
'PWT', 'PWV', 'PGV', 'POW', 'PWW', 'PYX', 'PYJ', 'PYV', 'PIK', 'PPJ', 'PYK', 'PYP', 'PZG', 'PJH', 'PZD', 'POD', 'PND', 'QOT', 'QAB', 'QQP', 'QRQ', 'QDM', 'QAK', 'QFT', 'QVP', 'QFK', 'QYQ', 'QTP', 'QUY', 'QIP', 'QHD', 'QHP', 'INH', 'QJZ', 'QJN', 'QJB', 'QJW', 'QBT', 'QNY', 'QZV', 'QLD', 'QLY', 'QLZ', 'QIB', 'QGH', 'QIH', 'QMP', 'QSB', 'QSN', 'QXQ', 'QUJ', 'QYH', 'QVH', 'QAT', 'QTJ', 'QWD', 'QWP', 'QRW', 'QXV', 'QXP', 'QXJ', 'QUV', 'QXC', 'QOY', 'QYF', 'QYL', 'QVQ', 'QYJ', 'QSJ', 'QBQ', 'QYT', 'QDZ', 'QRZ', 'QZK', 'RAH', 'RCW', 'RCG', 'RBH', 'RUQ', 'RQP', 'ROK', 'RSZ', 'RSD', 'RXZ', 'RVP', 'RYF', 'RHD', 'ROF', 'OBJ', 'SBP', 'AQW', 'OBP', 'ZWT', 'SBB', 'OCH', 'SWN', 'SCR', 'SCS', 'SMV', 'SCP', 'SCT', 'SCL', 'SDJ', 'ORQ', 'ODY', 'SIL', 'SXC', 'SEP', 'OUD', 'OJQ', 'OLH', 'OFB', 'STB', 'OTW', 'OKJ', 'OGC', 'SNQ', 'SVK', 'JBS', 'SED', 'SBM', 'SHP', 'SKT', 'SHC', 'VOP', 'SSD', 'SHL', 'OXP', 'OHD', 'OZW', 'SEL', 'SZR', 'SVP', 'ODP', 'SQH', 'OJJ', 'SJL', 'SJB', 'SUB', 'OJB', 'SML', 'SAH', 'SKD', 'OLK', 'IMH', 'SRP', 'SJJ', 'SOZ', 'SMM',
'SJD', 'OZL', 'SHM', 'SWT', 'OMP', 'SUR', 'SHJ', 'VLD', 'SPB', 'SLM', 'ZJD', 'SIB', 'SOL', 'LNM', 'SLQ', 'SLC', 'SNT', 'OLY', 'SLP', 'VFQ', 'SCF', 'OQH', 'OMQ', 'SXF', 'SYP', 'SOB', 'SBZ', 'PPT', 'SON', 'SFJ', 'SQM', 'SPF', 'SID', 'SXY', 'SQT', 'SRB', 'SRL', 'SQB', 'SWB', 'SSR', 'SJQ', 'OSK', 'SAD', 'SFT', 'SAT', 'SRD', 'SST', 'SSL', 'MZQ', 'SHX', 'SDH', 'OTB', 'SEV', 'SFM', 'SWP', 'SKB', 'SXR', 'OVH', 'SXZ', 'SAS', 'SOH', 'SXL', 'SXM', 'SXJ', 'FMH', 'SYB', 'SYV', 'OYP', 'SYJ', 'SPJ', 'OEJ', 'SOP', 'OYD', 'SYL', 'BDH', 'SAY', 'SUD', 'OYJ', 'SAJ', 'SND', 'OHH', 'SRH', 'BJQ', 'OZP', 'OZY', 'SZD', 'SZB', 'SNM', 'SIN', 'SEM', 'KAH', 'ITH', 'TMK', 'TID', 'TAJ', 'TBF', 'TBB', 'TTH', 'TCX', 'TZK', 'TCL', 'TCK', 'TRQ', 'TDZ', 'TGL', 'TGC', 'TOL', 'TGV', 'THX', 'THM', 'THF', 'THG', 'TKH', 'TIX', 'TNJ', 'TOT', 'PDQ', 'TZP', 'TKX', 'TMD', 'TEX', 'TJH', 'TFZ', 'TIZ', 'TLB', 'PXT', 'QSL', 'TMN', 'TNN', 'TLS', 'TRC', 'TCJ', 'TVT', 'TVW', 'TIT', 'TEB', 'TQX', 'TTK', 'TQL', 'TQJ', 'TCT', 'TAB', 'TIM', 'TUT', 'THB', 'TXJ', 'TSW', 'TCH', 'TRZ',
'TND', 'TYF', 'TIL', 'TYJ', 'TYB', 'TYP', 'TEK', 'TZH', 'TZJ', 'TXK', 'TZV', 'TEW', 'QWH', 'WBP', 'WAP', 'WVP', 'WUY', 'WEW', 'WCT', 'WEQ', 'WDB', 'WRB', 'WBK', 'WDL', 'WHP', 'WNZ', 'WVT', 'WRN', 'WDP', 'WHX', 'WFK', 'WFB', 'WUT', 'WXT', 'WGB', 'WGY', 'WGL', 'WGM', 'WVC', 'WHB', 'WHF', 'WCJ', 'WUB', 'WAM', 'WJP', 'WJL', 'WJJ', 'WQB', 'WKT', 'WBT', 'WLC', 'WEB', 'WVX', 'VHH', 'WLK', 'WQC', 'WSC', 'WLX', 'WBY', 'WRX', 'WNQ', 'WWG', 'WVY', 'WNJ', 'WPT', 'WUP', 'WQL', 'WWP', 'WSJ', 'WEV', 'WSP', 'WTP', 'WSV', 'WZJ', 'WVR', 'WGH', 'WVB', 'WXV', 'WVV', 'IFH', 'WXN', 'WYZ', 'WYB', 'RYH', 'WWB', 'WIM', 'WYC', 'WZL', 'WZY', 'WZV', 'WZB', 'WQP', 'WKD', 'EAM', 'XAZ', 'XAF', 'XAP', 'EBP', 'XLP', 'ECH', 'XCD', 'XEM', 'XRX', 'XCB', 'XCT', 'EDW', 'EJM', 'XMP', 'XEZ', 'XOD', 'EFG', 'XFV', 'XFW', 'EGG', 'XGN', 'XUJ', 'XIJ', 'XGJ', 'NBB', 'XXB', 'XIR', 'XWJ', 'EEP', 'XAX', 'XHB', 'EHQ', 'XHP', 'XEC', 'XYD', 'XYP', 'EKY', 'XJB', 'EJG', 'XJV', 'ENP', 'XJM', 'EKM', 'XJT', 'XTJ', 'XMT', 'EKB', 'EAQ', 'XNB', 'XDD', 'ELP', 'XPX', 'XLB', 'XLJ', 'XYB',
'GCT', 'XPH', 'XLD', 'XZB', 'XGT', 'XMD', 'XMB', 'XAT', 'XNV', 'XRN', 'ENQ', 'XNN', 'XAW', 'XPN', 'XPY', 'XPM', 'XOS', 'EPQ', 'XIW', 'XQB', 'XQD', 'XQJ', 'XRL', 'ESP', 'XSB', 'XZN', 'XSV', 'XSP', 'XIZ', 'XAM', 'XOB', 'XDT', 'XSJ', 'XTQ', 'XTP', 'XAN', 'EIP', 'XJQ', 'EPD', 'XWF', 'XSN', 'ENN', 'XQY', 'XXQ', 'XIF', 'XOV', 'XXM', 'XGQ', 'XZC', 'XXP', 'XDB', 'XUY', 'XBY', 'XWN', 'SNZ', 'XHM', 'EEQ', 'XFM', 'XYX', 'EXM', 'EIF', 'EJH', 'EYB', 'XZJ', 'UUH', 'XZX', 'XRP', 'XZT', 'XXV', 'XZD', 'XRD', 'ERP', 'YAC', 'YAX', 'YAS', 'YNB', 'YBB', 'YUD', 'YAB', 'YKM', 'AIH', 'YYB', 'YER', 'YKJ', 'YYY', 'YQQ', 'YIN', 'YHN', 'YCK', 'YEK', 'YNF', 'YAL', 'YPK', 'YAP', 'ACP', 'IXH', 'YCT', 'YDQ', 'YDJ', 'YDM', 'YGS', 'YGH', 'YDG', 'YAJ', 'IIQ', 'YYM', 'YRB', 'YOV', 'YIK', 'YOB', 'EVH', 'YHP', 'AEP', 'YHM', 'URH', 'YAM', 'AEW', 'YHG', 'AJP', 'YAT', 'YGJ', 'YJT', 'YIR', 'AFP', 'AZK', 'RFH', 'YGT', 'YJX', 'YLW', 'YSM', 'YDY', 'YLX', 'YLB', 'YSY', 'ALW', 'YLM', 'YLD', 'YQP', 'YUM', 'YMF', 'YVV', 'YXJ', 'YMN', 'YMM', 'YST', 'YNK', 'YVM', 'YNR', 'YZJ',
'ABM', 'YPP', 'UPH', 'YSR', 'AQK', 'YQT', 'YQV', 'YGP', 'YBF', 'YSJ', 'AUM', 'YSF', 'YUK', 'YSV', 'YVH', 'ASP', 'YSP', 'YAD', 'AIP', 'YSX', 'YUT', 'YIP', 'YTQ', 'YTZ', 'YSL', 'YUX', 'YWM', 'YHW', 'YOG', 'ACG', 'YXM', 'YIG', 'YYH', 'AFW', 'YIQ', 'ARP', 'YYL', 'YYJ', 'YZY', 'UZH', 'YSZ', 'YZK', 'YQM', 'AEM', 'YZD', 'ZEY', 'ZAD', 'ZBP', 'ZUP', 'ZJJ', 'ZCN', 'ZHY', 'ZQK', 'ZIK', 'ZCV', 'ZHT', 'ZDP', 'ZDB', 'ZFM', 'ZGD', 'ZGB', 'ZHX', 'VNH', 'ZIN', 'ZJY', 'ZUB', 'ZYP', 'ZOB', 'ZDH', 'ZEH', 'ZOD', 'CWJ', 'ZWQ', 'ZUJ', 'ZBW', 'ZLV', 'ZLT', 'ZIV', 'ZLD', 'ZXX', 'ZOQ', 'ZGF', 'ZDJ', 'VNJ', 'ZNJ', 'ZPF', 'ZPS', 'ZPR', 'ZVP', 'ZQY', 'ZTK', 'ZRC', 'ZLM', 'ZGQ', 'ZOG', 'ZHD', 'ZSQ', 'ZSY', 'ZSZ', 'ZSG', 'ZOP', 'ZWB', 'ZWD', 'ZOY', 'ZTN', 'ZXS', 'ZVT', 'ZIP', 'ZXC', 'ZVY', 'ZYN', 'ZAW', 'ZYJ', 'ZUW', 'GOS', 'ZUS', 'ZUX', 'ZZY', 'ZZM', 'ZXP', 'ZAL', 'ZZC', 'ZAQ', 'XPF', 'AQC', 'ARX', 'AUR', 'ARG', 'ASE', 'AXL', 'ADF', 'BBZ', 'BBE', 'FWH', 'BMP', 'FGQ', 'FEP', 'FHP', 'FCP', 'BBY', 'BRT', 'BFQ', 'BUB', 'FZW', 'BSN', 'BGY', 'NGQ', 'BVT', 'BXY',
'UKZ', 'FWP', 'FHW', 'FNC', 'BDE', 'BIK', 'FOP', 'CUY', 'CBY', 'VAT', 'GUH', 'KNW', 'CVO', 'FVH', 'CNG', 'FQQ', 'FPQ', 'CQJ', 'COW', 'CSE', 'CBQ', 'CSU', 'RXQ', 'CTE', 'CNS', 'CWY', 'CBH', 'CXE', 'CYN', 'CNQ', 'CWT', 'DCZ', 'RBZ', 'DCG', 'DCJ', 'IYQ', 'RDD', 'RWT', 'DRB', 'KRQ', 'DNE', 'RGT', 'RMT', 'RTQ', 'UWQ', 'NVQ', 'RRQ', 'FWQ', 'DJE', 'DIM', 'DNY', 'KOQ', 'DSD', 'DRH', 'PRH', 'DAL', 'OWH', 'DTO', 'WWQ', 'DNJ', 'DDG', 'DWG', 'IRQ', 'DBN', 'KJW', 'DOK', 'DYG', 'DOP', 'WZQ', 'FQW', 'IXW', 'EGY', 'EFN', 'FBZ', 'FDT', 'FDZ', 'PUH', 'FUW', 'FEW', 'FLN', 'FNM', 'FUY', 'FQE', 'KCQ', 'FAW', 'FSL', 'FOQ', 'FZQ', 'NZQ', 'FBM', 'FYB', 'FDG', 'FZG', 'GCG', 'VUW', 'GAE', 'GMP', 'GCZ', 'GUP', 'FMW', 'GNN', 'KIW', 'GVW', 'HGD', 'GEM', 'GLE', 'GEZ', 'IMQ', 'FBQ', 'GQT', 'GXM', 'GAZ', 'GPT', 'GUN', 'FFQ', 'FKQ', 'GBS', 'GPM', 'GAJ', 'STS', 'KQW', 'KEW', 'GNP', 'HNS', 'PLH', 'HFF', 'HKB', 'SER', 'HUL', 'HPP', 'KDQ', 'HDJ', 'HDO', 'HTV', 'HTB', 'COH', 'ENH', 'KGN', 'KAN', 'HNN', 'KXN', 'HPB', 'KAQ', 'HCF', 'KHN', 'KHQ', 'IHN', 'FAQ', 'HBM',
'KLQ', 'VLY', 'KMQ', 'IUQ', 'HPV', 'HNG', 'HOH', 'HVZ', 'FBH', 'HFV', 'HPN', 'KRN', 'KSN', 'NYH', 'IHP', 'HLN', 'VUR', 'KNN', 'KXQ', 'HKP', 'HTY', 'HEK', 'IYN', 'KXY', 'HYT', 'HWV', 'KNQ', 'JUL', 'JAJ', 'JDG', 'NSH', 'PFQ', 'JHD', 'JOL', 'JMP', 'JLS', 'JLF', 'KJQ', 'JVK', 'JME', 'JCS', 'JJH', 'OKH', 'JUS', 'JNJ', 'JWH', 'JSM', 'JPQ', 'JUN', 'NRH', 'JDV', 'LOH', 'JMZ', 'JXG', 'JBE', 'JBJ', 'JOW', 'JTN', 'JYL', 'PYQ', 'PYH', 'JZV', 'JEP', 'KBF', 'FLQ', 'KHU', 'QKW', 'KLD', 'KOM', 'KTQ', 'KVW', 'IDZ', 'UCZ', 'GMH', 'LTQ', 'LCF', 'NWW', 'ILQ', 'UUP', 'VCZ', 'LGK', 'ACQ', 'FVW', 'LVO', 'UOQ', 'UQQ', 'INW', 'LLQ', 'KFQ', 'LQM', 'LXV', 'KGQ', 'BNQ', 'UDP', 'LBN', 'IKW', 'UQZ', 'LNK', 'LUF', 'COK', 'KFW', 'KBQ', 'UKQ', 'INJ', 'LPE', 'LGY', 'UDV', 'IVW', 'LAG', 'LDH', 'KRW', 'LBK', 'LEH', 'LUK', 'LNR', 'LSZ', 'LZE', 'LDJ', 'OMH', 'MHN', 'MDN', 'MNO', 'MJN', 'MLR', 'MBJ', 'MLM', 'MSR', 'MBK', 'MBS', 'MIF', 'IUW', 'MSN', 'MXJ', 'MYO', 'KIQ', 'MBM', 'MZF', 'MZM', 'NBE', 'NEF', 'NCE', 'NDG', 'NXG', 'NDJ', 'NOJ', 'NUT', 'NFG', 'NDN', 'NKW',
'FIW', 'NDQ', 'LLH', 'NMO', 'NFZ', 'NXZ', 'NBS', 'NOY', 'NCQ', 'NYE', 'NYF', 'PAN', 'PUE', 'PBM', 'PBE', 'PCE', 'PGW', 'PAK', 'PUT', 'PNN', 'PEQ', 'PAZ', 'PPW', 'PUD', 'PSK', 'PBG', 'PYF', 'PYG', 'PDV', 'PUK', 'PZM', 'PAE', 'PMW', 'QGJ', 'QFW', 'QCE', 'QHK', 'QMQ', 'QET', 'QFB', 'QBM', 'QIM', 'QEW', 'QNB', 'QEJ', 'QVW', 'QAY', 'QSQ', 'QIJ', 'QSO', 'QGV', 'QBY', 'QNC', 'QWQ', 'QNZ', 'QZQ', 'RUO', 'RQW', 'RCK', 'RXG', 'RIH', 'RVW', 'RKO', 'RVQ', 'SFF', 'SDL', 'FIQ', 'KKW', 'SUPDL', 'FIQ', 'KKW', 'SUP', 'NFQ', 'SOK', 'GQH', 'AHQ', 'SXP', 'SWZ', 'OJT', 'IPW', 'SYM', 'IXP', 'IQW', 'SHS', 'SVM', 'FMQ', 'ONC', 'SBN', 'SQE', 'SQN', 'S', 'KSQ', 'RNQ', 'INQ',MR', 'NSQ', 'ARQ', 'KUQ', 'KSQ', 'RNQ', 'INQ', 'QHW', 'STE', 'OGQ', 'NPH', 'SLH', 'SSH', 'GPH', 'IPQ', 'MPH', 'OYT', 'OVQ', 'OCT', 'SNV', 'IOQ', 'SRQ', 'IFQ', 'QQJ', 'OSW', 'TAD', 'TOK', 'TBZ', 'TTN', 'TIV', 'THR', 'TAM', 'JYN', 'TJN', 'TEN', 'TXL', 'TJB', 'KXH', 'TAR', 'TNS', 'TNW', 'TIJ', 'TWJ', 'KQQ', 'THN', 'TAZ', 'TNV', 'TST', 'TOP', 'WGK', 'WFG', 'WBL', 'WHK', 'WPC', 'WMT', 'WFN', 'WAR', 'WET', 'WSE', 'IIH', 'WAH', 'WOV', 'WDH', 'WBS', 'WCS', 'WYG', 'WEJ', 'WZE', 'WIF', 'WBZ', 'XDZ', 'XVF', 'ERN', 'EWW', 'XFT', 'NQQ', 'FVQ', 'XTV', 'XJN', 'GDN', 'WDQ', 'EJQ', 'EWQ', 'IRW', 'NKQ', 'IJP', 'ITW', 'XKN', 'UNN', 'EMQ', 'EGQ', 'EDQ', 'EDP', 'XWC', 'EXF', 'QSH', 'EGF', 'XBG', 'XQF', 'OYN', 'XOY', 'XWS', 'EZF', 'FNQ', 'YFW', 'RGH', 'ABV', 'WMW', 'YSK', 'YEG', 'AWW', 'NAQ', 'AOJ', 'IXQ', 'YBZ', 'VTM', 'GTH', 'AJV', 'RVH', 'YKP', 'YXL', 'QUH', 'YEF', 'YAG', 'YEY', 'YMX', 'YKQ', 'KPQ', 'ASY', 'YCZ', 'KZQ', 'YGG', 'CTQ', 'YTS', 'YKG', 'YLK', 'YTL', 'YXS', 'YBS', 'YUH', 'AXM', 'YVK', 'YZV', 'YXG', 'YXY', 'CTH', 'IZJ', 'ZDS', 'ZHP', 'ZMN', 'ZEC', 'ZUT','ZHW', 'ZJE', 'ZIJ', 'ZPQ', 'IZW', 'KKQ', 'ZKE', 'ZSN', 'ZRQ', 'ZLN', 'ZCS', 'FCQ', 'ZQH', 'KWQ', 'ZAZ', 'ZXJ', 'FYW', 'ZEJ', 'WZW', 'ZAP', 'ZNK', 'ZDC', 'ZAF', 'KVQ']
def getCityName(code):
index=codes.index(code)
return cities[index]
def getCode(city):
index=cities.index(city)
return code[index] |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torchvision.datasets import CIFAR10
import flash
from flash.core.data.utils import download_data
from flash.image import ImageClassificationData, ImageEmbedder
# 1. Download the data and prepare the datamodule
datamodule = ImageClassificationData.from_datasets(
train_dataset=CIFAR10(".", download=True),
batch_size=16,
)
# 2. Build the task
embedder = ImageEmbedder(
backbone="resnet",
training_strategy="barlow_twins",
head="simclr_head",
pretraining_transform="barlow_twins_transform",
training_strategy_kwargs={"latent_embedding_dim": 128},
pretraining_transform_kwargs={"size_crops": [196]},
)
# 3. Create the trainer and pre-train the encoder
# use accelerator='ddp' when using GPU(s),
# i.e. flash.Trainer(max_epochs=3, gpus=1, accelerator='ddp')
trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())
trainer.fit(embedder, datamodule=datamodule)
# 4. Save the model!
trainer.save_checkpoint("image_embedder_model.pt")
# 5. Download the downstream prediction dataset and generate embeddings
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/")
embeddings = embedder.predict(
[
"data/hymenoptera_data/predict/153783656_85f9c3ac70.jpg",
"data/hymenoptera_data/predict/2039585088_c6f47c592e.jpg",
]
)
# list of embeddings for images sent to the predict function
print(embeddings)
|
from fly_behavior import FlyBehavior
class FlyWithWings(FlyBehavior):
def fly(self):
print('I\'m flying')
|
import base64
import inspect
class Private(object):
"""
This class attempts to keep private information.
Clearly this is python so we cannot actually achieve that aim.
Any attacker could easily monkey patch out any of this code rendering it
useless.
The point of this is to make the code less likely to leak sensitive
information for example via the logs, user notifications, displaying in
i3bar.
THIS IS NOT SECURE!
"""
def __init__(self, encoded, module_name):
self._decoded = False
self._encoded = encoded
self._module_name = module_name.split(' ')[0]
self._private = u'***' # this is used when the user is untrusted
self._value = u'encrypted'
# Try to decrypt data if possible
self._decrypt()
def _decrypt(self, key=None):
"""
method called to decrypt the value
"""
if not self._decoded:
self._decode(key)
def __setattr__(self, name, value):
"""
Do not allow this object to be updated outside of this module
"""
stack = inspect.stack()
if inspect.getmodule(stack[1][0]).__name__ != __name__:
return
return object.__setattr__(self, name, value)
def __getattribute__(self, name):
"""
Check if user can access this attribute.
"""
# allowed by all users
if name in ['_decrypt', '_decode']:
return object.__getattribute__(self, name)
# allow internal calls
stack = inspect.stack()
state = ((not name.startswith('_')) or (
inspect.getmodule(stack[1][0]).__name__ == __name__ and
stack[1][3] in ['_catch', '_decode']))
if state:
return object.__getattribute__(self, name)
return None
def catch_factory(attr):
"""
Factory returning a catch function
"""
def _catch(s, *args, **kw):
"""
This is used to catch and process all calls.
"""
def process(value):
"""
return the actual value after processing
"""
if attr.startswith('__'):
# __repr__, __str__ etc
return getattr(value, attr)(*args, **kw)
else:
# upper, lower etc
return getattr(u''.__class__, attr)(value, *args, **kw)
stack = inspect.stack()
mod = inspect.getmodule(stack[1][0])
# We are called from the owning module so allow
if mod.__name__.split('.')[-1] == s._module_name:
return process(s._value)
# very shallow calling no stack
if len(stack) < 3:
return process(s._private)
# Check if this is an internal or external module. We need to allow
# calls to modules like requests etc
remote = not inspect.getmodule(stack[2][0]).__name__.startswith('py3status')
valid = False
# go through the stack to see how we came through the code
for frame in stack[2:]:
mod = inspect.getmodule(frame[0])
if remote and mod.__name__.split('.')[-1] == s._module_name:
# the call to an external module started in the correct module
# so allow this usage
valid = True
break
if mod.__name__ == 'py3status.py3' and frame[3] == 'request':
# Py3.request has special needs due so it is allowed to access
# private variables.
valid = True
break
if mod.__name__.startswith('py3status'):
# We were somewhere else in py3status than the module, maybe we
# are doing some logging. Prevent usage
return process(s._private)
if valid:
return process(s._value)
return process(s._private)
return _catch
# We need to populate our base class with all the methods that unicode
# has. We will implement them using the _catch function created by out
# factory. We want to exclude a few select methods
EXCLUDE = ['__init__', '__getattribute__', '__new__', '__setattr__', '__init_subclass__']
for attr in dir(u''):
if attr.startswith('__') and attr in EXCLUDE:
continue
if '__call__' in dir(getattr(u'', attr)):
setattr(Private, attr, catch_factory(attr))
class PrivateBase64(Private):
"""
Simple base64 encoder
"""
def _decode(self, key):
if self._encoded is None:
return
try:
new_value = base64.b64decode(self._encoded)
self._value = new_value.decode('utf-8')
except Exception:
self._value = 'Error'
self._decoded = True
class PrivateHide(Private):
"""
This does not encode the data in any way but it does keep it from being
shown in the log files, i3bar etc
"""
def _decode(self, key):
if self._encoded is None:
return
self._value = self._encoded
self._decoded = True
if __name__ == '__main__':
# This module can read this
x = PrivateHide('test', '__main__')
print(x)
print(x.upper())
print(x.split('e'))
# This module cannot read this
x = PrivateHide('test', 'xxx')
print(x)
print(x.upper())
print(x.split('e'))
|
import compas_rrc as rrc
if __name__ == "__main__":
# Create Ros Client
ros = rrc.RosClient()
ros.run()
# Create ABB Client
abb = rrc.AbbClient(ros, "/rob1")
print("Connected.")
# Stop the motion tast
future = abb.send(rrc.Stop(feedback_level=rrc.FeedbackLevel.DONE))
# Execute other code
print("Press Play on the FlexPendant to continue.")
# Wait for future feedback
done = future.result()
# Print feedback
print("Feedback = ", done)
# End of Code
print("Finished")
# Close client
ros.close()
|
sword_left='''
.
;-{<>===>
`'''
sword_right='''
.
<===<>}-;
´''' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.