hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa37134ef47073b2dedc4aa1d0534abbd7e59f38
| 1,054
|
py
|
Python
|
umd/products/ooi.py
|
egi-qc/umd-verification
|
328e875f9633c9e602e9eea61d2590def373098e
|
[
"Apache-2.0"
] | 1
|
2019-10-31T10:41:37.000Z
|
2019-10-31T10:41:37.000Z
|
umd/products/ooi.py
|
egi-qc/umd-verification
|
328e875f9633c9e602e9eea61d2590def373098e
|
[
"Apache-2.0"
] | 12
|
2015-06-04T12:08:18.000Z
|
2018-06-05T09:54:58.000Z
|
umd/products/ooi.py
|
egi-qc/umd-verification
|
328e875f9633c9e602e9eea61d2590def373098e
|
[
"Apache-2.0"
] | 3
|
2015-09-15T13:15:50.000Z
|
2018-04-26T15:10:24.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from umd import base
from umd.base.configure.puppet import PuppetConfig
from umd import config
class OOIDeploy(base.Deploy):
def pre_config(self):
os_release = config.CFG["openstack_release"]
self.cfgtool.extra_vars = "ooi::openstack_version: %s" % os_release
ooi = OOIDeploy(
name="ooi",
doc="OCCI OpenStack Interface.",
cfgtool=PuppetConfig(
manifest="ooi.pp",
module=("git://github.com/egi-qc/puppet-ooi.git", "umd"),
),
qc_specific_id="ooi",
)
| 31.939394
| 75
| 0.717268
|
f3f3cbf03d1c4d37cf25849591f826dfbfe4e91a
| 230
|
py
|
Python
|
pacote-download/d021 - tocar um MP3.py
|
Carlos-DOliveira/cursoemvideo-python3
|
4546c8a7360155243e2f7ecbbb80c57868f770a2
|
[
"MIT"
] | null | null | null |
pacote-download/d021 - tocar um MP3.py
|
Carlos-DOliveira/cursoemvideo-python3
|
4546c8a7360155243e2f7ecbbb80c57868f770a2
|
[
"MIT"
] | null | null | null |
pacote-download/d021 - tocar um MP3.py
|
Carlos-DOliveira/cursoemvideo-python3
|
4546c8a7360155243e2f7ecbbb80c57868f770a2
|
[
"MIT"
] | null | null | null |
''' 021 Faça um programa em Python que abra e reproduza o áudio de um arq MP3'''
import pygame
pygame.init()
pygame.mixer.music.load('ex021.mp3')
pygame.mixer.music.play()
pygame.event.wait()
# não deu, pq o pygame não instalou
| 23
| 80
| 0.734783
|
b04559e1b287a6936e513a3fe23f7ead72ce6604
| 6,739
|
py
|
Python
|
srunner/scenarios/signalized_junction_left_turn.py
|
ll7/scenario_runner
|
277c570b0bc046ef1f9936d3b633d665d4a79197
|
[
"MIT"
] | null | null | null |
srunner/scenarios/signalized_junction_left_turn.py
|
ll7/scenario_runner
|
277c570b0bc046ef1f9936d3b633d665d4a79197
|
[
"MIT"
] | null | null | null |
srunner/scenarios/signalized_junction_left_turn.py
|
ll7/scenario_runner
|
277c570b0bc046ef1f9936d3b633d665d4a79197
|
[
"MIT"
] | 1
|
2020-05-28T11:41:29.000Z
|
2020-05-28T11:41:29.000Z
|
#!/usr/bin/env python
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Collection of traffic scenarios where the ego vehicle (hero)
is making a left turn
"""
from six.moves.queue import Queue # pylint: disable=relative-import
import py_trees
import carla
from agents.navigation.local_planner import RoadOption
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.scenarioatomics.atomic_behaviors import (ActorTransformSetter,
ActorDestroy,
ActorSource,
ActorSink,
WaypointFollower)
from srunner.scenariomanager.scenarioatomics.atomic_criteria import CollisionTest
from srunner.scenariomanager.scenarioatomics.atomic_trigger_conditions import DriveDistance
from srunner.scenarios.basic_scenario import BasicScenario
from srunner.tools.scenario_helper import generate_target_waypoint
class SignalizedJunctionLeftTurn(BasicScenario):
"""
Implementation class for Hero
Vehicle turning left at signalized junction scenario,
Traffic Scenario 08.
This is a single ego vehicle scenario
"""
timeout = 80 # Timeout of scenario in seconds
def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, criteria_enable=True,
timeout=80):
"""
Setup all relevant parameters and create scenario
"""
self._world = world
self._map = CarlaDataProvider.get_map()
self._target_vel = 6.9
self._brake_value = 0.5
self._ego_distance = 110
self._traffic_light = None
self._other_actor_transform = None
self._blackboard_queue_name = 'SignalizedJunctionLeftTurn/actor_flow_queue'
self._queue = py_trees.blackboard.Blackboard().set(self._blackboard_queue_name, Queue())
self._initialized = True
super(SignalizedJunctionLeftTurn, self).__init__("TurnLeftAtSignalizedJunction",
ego_vehicles,
config,
world,
debug_mode,
criteria_enable=criteria_enable)
self._traffic_light = CarlaDataProvider.get_next_traffic_light(self.ego_vehicles[0], False)
traffic_light_other = CarlaDataProvider.get_next_traffic_light(self.other_actors[0], False)
if self._traffic_light is None or traffic_light_other is None:
raise RuntimeError("No traffic light for the given location found")
self._traffic_light.set_state(carla.TrafficLightState.Green)
self._traffic_light.set_green_time(self.timeout)
#set the traffic light in front of the ego vehicle on red light
#self._traffic_light.set_state(carla.TrafficLightState.Red)
#self._traffic_light.set_red_time(self.timeout)
# other vehicle's traffic light
traffic_light_other.set_state(carla.TrafficLightState.Green)
traffic_light_other.set_green_time(self.timeout)
def _initialize_actors(self, config):
"""
Custom initialization
"""
self._other_actor_transform = config.other_actors[0].transform
first_vehicle_transform = carla.Transform(
carla.Location(config.other_actors[0].transform.location.x,
config.other_actors[0].transform.location.y,
config.other_actors[0].transform.location.z - 500),
config.other_actors[0].transform.rotation)
first_vehicle = CarlaDataProvider.request_new_actor(config.other_actors[0].model, self._other_actor_transform)
first_vehicle.set_transform(first_vehicle_transform)
first_vehicle.set_simulate_physics(enabled=False)
self.other_actors.append(first_vehicle)
def _create_behavior(self):
"""
Hero vehicle is turning left in an urban area,
at a signalized intersection, while other actor coming straight
.The hero actor may turn left either before other actor
passes intersection or later, without any collision.
After 80 seconds, a timeout stops the scenario.
"""
sequence = py_trees.composites.Sequence("Sequence Behavior")
# Selecting straight path at intersection
target_waypoint = generate_target_waypoint(
CarlaDataProvider.get_map().get_waypoint(self.other_actors[0].get_location()), 0)
# Generating waypoint list till next intersection
plan = []
wp_choice = target_waypoint.next(1.0)
while not wp_choice[0].is_intersection:
target_waypoint = wp_choice[0]
plan.append((target_waypoint, RoadOption.LANEFOLLOW))
wp_choice = target_waypoint.next(1.0)
# adding flow of actors
actor_source = ActorSource(
['vehicle.tesla.model3', 'vehicle.audi.tt'],
self._other_actor_transform, 15, self._blackboard_queue_name)
# destroying flow of actors
actor_sink = ActorSink(plan[-1][0].transform.location, 10)
# follow waypoints untill next intersection
move_actor = WaypointFollower(self.other_actors[0], self._target_vel, plan=plan,
blackboard_queue_name=self._blackboard_queue_name, avoid_collision=True)
# wait
wait = DriveDistance(self.ego_vehicles[0], self._ego_distance)
# Behavior tree
root = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
root.add_child(wait)
root.add_child(actor_source)
root.add_child(actor_sink)
root.add_child(move_actor)
sequence.add_child(ActorTransformSetter(self.other_actors[0], self._other_actor_transform))
sequence.add_child(root)
sequence.add_child(ActorDestroy(self.other_actors[0]))
return sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
collison_criteria = CollisionTest(self.ego_vehicles[0])
criteria.append(collison_criteria)
return criteria
def __del__(self):
self._traffic_light = None
self.remove_all_actors()
| 43.75974
| 118
| 0.648316
|
0e0b6a171029ca1525374b7136351dd271e11725
| 28,691
|
py
|
Python
|
tests/workflow/test_time_series_extractor.py
|
MuttData/soam
|
65612a02552668c6721dc20e675654883391c3e9
|
[
"Apache-2.0"
] | 1
|
2021-09-17T01:14:57.000Z
|
2021-09-17T01:14:57.000Z
|
tests/workflow/test_time_series_extractor.py
|
MuttData/soam
|
65612a02552668c6721dc20e675654883391c3e9
|
[
"Apache-2.0"
] | null | null | null |
tests/workflow/test_time_series_extractor.py
|
MuttData/soam
|
65612a02552668c6721dc20e675654883391c3e9
|
[
"Apache-2.0"
] | 1
|
2021-08-09T14:22:50.000Z
|
2021-08-09T14:22:50.000Z
|
"""Tests for TimeSeriesExtractor.
"""
import os
import unittest
from unittest import main
import pandas as pd
from sqlalchemy import Column
from sqlalchemy.types import Float, Integer, String
from soam.constants import TIMESTAMP_COL
from soam.data_models import AbstractIDBase, AbstractTimeSeriesTable
from soam.workflow import TimeSeriesExtractor
from tests.db_test_case import TEST_DB_CONNSTR, PgTestCase
class ConcreteTimeSeriesTable(AbstractTimeSeriesTable):
"""Creates the Concrete Time Series Table Object."""
__tablename__ = "test_data"
# We should really be using the timestamp inherited from AbstractTimeSeriesTable.
game = Column(String(64))
country = Column(String(2))
ad_network = Column(String(64))
ad_type = Column(String(64))
placement_id = Column(String(256))
waterfall_position = Column(Integer())
total_count_view_opportunity = Column(Integer())
count_cache_request = Column(Integer())
count_cache_success = Column(Integer())
count_view_start = Column(Integer())
count_view_click = Column(Integer())
requests = Column(Integer())
impressions = Column(Integer())
clicks = Column(Integer())
revenue = Column(Float())
# TODO: Add unique constraint for consistency sake.
# https://gitlab.com/mutt_data/tfg-adsplash/-/blob/master/adsplash/store/dataset.py#L442
class ConcreteAdNetworkJoinTimeSeriesTable(AbstractIDBase):
"""Creates the Concrete Ad Network Object."""
__tablename__ = "test_ad_network_join_data"
ad_network = Column(String(64))
ad_network_group = Column(String(64))
class ConcretePlacementIdJoinTimeSeriesTable(AbstractIDBase):
"""Creates the Concrete Placement Id Object."""
__tablename__ = "test_placement_id_join_data"
placement_id = Column(String(256))
placement_id_group = Column(String(64))
column_mappings = {
"opportunities": "total_count_view_opportunity AS opportunities",
"ecpm": """COALESCE(
1000.0 * revenue::float8 / NULLIF(impressions, 0), 0) AS ecpm""",
"share": """COALESCE(
impressions::float8
/ NULLIF(total_count_view_opportunity, 0), 0) AS share""",
"quantile": """
COALESCE(1 - (
SUM(impressions)
OVER (
PARTITION BY %(quantile_partition)s
ORDER BY
timestamp,
waterfall_position,
revenue / NULLIF(impressions, 0)
DESC
ROWS BETWEEN unbounded preceding AND current row)
)::float8 / NULLIF(total_count_view_opportunity, 0), 0) AS quantile
""",
}
aggregated_column_mappings = {
"opportunities": """
MAX(total_count_view_opportunity)::float8
AS opportunities
""",
"total_count_view_opportunity": """
MAX(total_count_view_opportunity)::float8
AS total_count_view_opportunity
""",
"count_cache_request": """
SUM(count_cache_request) AS count_cache_request
""",
"count_cache_success": """
SUM(count_cache_success) AS count_cache_success
""",
"count_view_start": "SUM(count_view_start) AS count_view_start",
"count_view_click": "SUM(count_view_click) AS count_view_click",
"requests": "SUM(requests) AS requests",
"impressions": "SUM(impressions) AS impressions",
"clicks": "SUM(clicks) AS clicks",
"revenue": "SUM(revenue) AS revenue",
"ecpm": """COALESCE(
1000.0 * SUM(revenue)::float8
/ NULLIF(SUM(impressions), 0), 0) AS ecpm""",
"share": """COALESCE(
SUM(impressions)::float8
/ NULLIF(MAX(total_count_view_opportunity), 0), 0) AS share""",
"quantile": """
COALESCE(1 - (
SUM(SUM(impressions))
OVER (
PARTITION BY %(quantile_partition)s
ORDER BY
timestamp,
waterfall_position,
SUM(revenue) / NULLIF(SUM(impressions), 0) DESC
ROWS BETWEEN unbounded preceding AND current row)
)::float8 / NULLIF(MAX(total_count_view_opportunity), 0)::float8,
0) AS quantile
""",
}
@unittest.skipIf(not os.getenv(TEST_DB_CONNSTR), f"{TEST_DB_CONNSTR} is not set")
class TestDatasetStore(PgTestCase):
"""Test dataset store object."""
def _test_load(
self,
columns,
dimensions,
dimensions_values,
start_date,
end_date,
order_by,
expected_values,
extra_where_conditions=None,
extra_having_conditions=None,
inner_join=None,
table_mapping=None,
):
df = self.time_series_extractor.extract(
build_query_kwargs=dict(
columns=columns,
dimensions=dimensions,
dimensions_values=dimensions_values,
start_date=start_date,
end_date=end_date,
order_by=order_by,
extra_where_conditions=extra_where_conditions,
extra_having_conditions=extra_having_conditions,
column_mappings=column_mappings,
aggregated_column_mappings=aggregated_column_mappings,
inner_join=inner_join,
table_mapping=table_mapping,
)
)
# Fix for backwards compatible with original tests.
if TIMESTAMP_COL in df.columns and not df.empty:
df[TIMESTAMP_COL] = df[TIMESTAMP_COL].dt.strftime("%Y-%m-%d")
self.assertTrue(isinstance(df, pd.DataFrame))
columns = [c_name.split(".")[-1] for c_name in columns]
self.assertEqual(df.columns.tolist(), columns)
self.assertEqual(sorted(df.values.tolist()), sorted(expected_values))
def test_load_basic_columns_order_by(self):
columns = [TIMESTAMP_COL, "opportunities", "impressions", "revenue"]
values = [
["2019-09-01", 1000, 100, 20],
["2019-09-01", 1000, 200, 30],
["2019-09-01", 1000, 300, 40],
["2019-09-02", 300, 30, 6],
]
self._test_load(
columns=columns,
dimensions=None,
dimensions_values=None,
start_date=None,
end_date=None,
order_by=[TIMESTAMP_COL, "impressions"],
expected_values=values,
)
def test_join_load_basic_columns_order_by(self):
columns = [TIMESTAMP_COL, "opportunities", "tjd.ad_network", "ad_network_group"]
values = [
['2019-09-01', 1000, 'source1', 'source_group_B'],
['2019-09-01', 1000, 'source2', 'source_group_A'],
['2019-09-01', 1000, 'source2', 'source_group_A'],
['2019-09-02', 300, 'source2', 'source_group_A'],
]
self._test_load(
columns=columns,
dimensions=None,
dimensions_values=None,
start_date=None,
end_date=None,
order_by=[TIMESTAMP_COL, "tjd.ad_network"],
expected_values=values,
inner_join=[
(
"test_ad_network_join_data",
"tjd",
"tjd.ad_network = test_data.ad_network",
)
],
)
def test_join_load_basic_columns_order_by_no_alias(self):
columns = [
TIMESTAMP_COL,
"opportunities",
"test_ad_network_join_data.ad_network",
"ad_network_group",
]
values = [
['2019-09-01', 1000, 'source1', 'source_group_B'],
['2019-09-01', 1000, 'source2', 'source_group_A'],
['2019-09-01', 1000, 'source2', 'source_group_A'],
['2019-09-02', 300, 'source2', 'source_group_A'],
]
self._test_load(
columns=columns,
dimensions=None,
dimensions_values=None,
start_date=None,
end_date=None,
order_by=[TIMESTAMP_COL, "test_ad_network_join_data.ad_network"],
expected_values=values,
inner_join=[
(
"test_ad_network_join_data",
None,
"test_ad_network_join_data.ad_network = test_data.ad_network",
)
],
)
def test_join_tables_with_alias(self):
columns = [
TIMESTAMP_COL,
"opportunities",
"b.ad_network",
"ad_network_group",
]
values = [
['2019-09-01', 1000, 'source1', 'source_group_B'],
['2019-09-01', 1000, 'source2', 'source_group_A'],
['2019-09-01', 1000, 'source2', 'source_group_A'],
['2019-09-02', 300, 'source2', 'source_group_A'],
]
self._test_load(
columns=columns,
dimensions=None,
dimensions_values=None,
start_date=None,
end_date=None,
order_by=None,
expected_values=values,
table_mapping='a',
inner_join=[
("test_ad_network_join_data", 'b', "b.ad_network = a.ad_network",)
],
)
def test_multiple_join_load_basic_columns_order_by(self):
columns = [
TIMESTAMP_COL,
"opportunities",
"tjd.ad_network",
"ad_network_group",
"tpi.placement_id",
"placement_id_group",
]
values = [
['2019-09-01', 1000, 'source1', 'source_group_B', 'z', 'placement_group_1'],
['2019-09-01', 1000, 'source2', 'source_group_A', 'b', 'placement_group_2'],
['2019-09-01', 1000, 'source2', 'source_group_A', 'a', 'placement_group_1'],
['2019-09-02', 300, 'source2', 'source_group_A', 'a', 'placement_group_1'],
]
self._test_load(
columns=columns,
dimensions=None,
dimensions_values=None,
start_date=None,
end_date=None,
order_by=[TIMESTAMP_COL, "tjd.ad_network"],
expected_values=values,
inner_join=[
(
"test_ad_network_join_data",
"tjd",
"tjd.ad_network = test_data.ad_network",
),
(
"test_placement_id_join_data",
"tpi",
"tpi.placement_id = test_data.placement_id",
),
],
)
def test_join_aggregation_load_basic_columns_no_alias(self):
columns = ["opportunities", "ad_network_group"]
values = [[1000.0, 'source_group_A'], [1000.0, 'source_group_B']]
self._test_load(
columns=columns,
dimensions=["ad_network_group"],
dimensions_values=None,
start_date=None,
end_date=None,
order_by=None,
expected_values=values,
inner_join=[
(
"test_ad_network_join_data",
None,
"test_ad_network_join_data.ad_network = test_data.ad_network",
)
],
)
def test_multiple_join_aggregation_basic_columns_order_by(self):
columns = [
"opportunities",
"ad_network_group",
"placement_id_group",
]
values = [
[1000.0, 'source_group_B', 'placement_group_1'],
[1000.0, 'source_group_A', 'placement_group_1'],
[1000.0, 'source_group_A', 'placement_group_2'],
]
self._test_load(
columns=columns,
dimensions=["ad_network_group", "placement_id_group"],
dimensions_values=None,
start_date=None,
end_date=None,
order_by=None,
expected_values=values,
inner_join=[
(
"test_ad_network_join_data",
"tjd",
"tjd.ad_network = test_data.ad_network",
),
(
"test_placement_id_join_data",
"tpi",
"tpi.placement_id = test_data.placement_id",
),
],
)
def test_multiple_join_aggregation_basic_columns_filtered(self):
columns = [
"opportunities",
"ad_network_group",
"placement_id_group",
]
values = [
[1000.0, 'source_group_A', 'placement_group_1'],
[1000.0, 'source_group_A', 'placement_group_2'],
]
self._test_load(
columns=columns,
dimensions=["ad_network_group", "placement_id_group"],
dimensions_values=['source_group_A', None],
start_date=None,
end_date=None,
order_by=None,
expected_values=values,
inner_join=[
(
"test_ad_network_join_data",
"tjd",
"tjd.ad_network = test_data.ad_network",
),
(
"test_placement_id_join_data",
"tpi",
"tpi.placement_id = test_data.placement_id",
),
],
)
def test_load_basic_columns_aggregation_order_by(self):
columns = ["opportunities", "impressions", "revenue"]
values = [
[1000, 600, 90],
[300, 30, 6],
]
self._test_load(
columns=columns,
dimensions=[TIMESTAMP_COL],
dimensions_values=None,
start_date=None,
end_date=None,
order_by=[TIMESTAMP_COL],
expected_values=values,
)
# def test_load_composite_columns(self):
# columns = [TIMESTAMP_COL, "ecpm", "share", "quantile"]
# values = [
# ["2019-09-01", 1e3 * 40 / 300, 300 / 1000, 0.40],
# ["2019-09-01", 1e3 * 30 / 200, 200 / 1000, 0.70],
# ["2019-09-01", 1e3 * 20 / 100, 100 / 1000, 0.90],
# ["2019-09-02", 1e3 * 6 / 30, 30 / 300, 0.90],
# ]
# self._test_load(
# columns=columns,
# dimensions=None,
# dimensions_values=None,
# start_date=None,
# end_date=None,
# order_by=[TIMESTAMP_COL, "ecpm"],
# expected_values=values,
# )
# def test_load_composite_columns_aggregation(self):
# columns = [TIMESTAMP_COL, "waterfall_position", "ecpm", "share", "quantile"]
# values = [
# ["2019-09-01", 0, 1e3 * 20 / 100, 100 / 1000, 0.90],
# ["2019-09-01", 1, 1e3 * 30 / 200, 200 / 1000, 0.70],
# ["2019-09-01", 2, 1e3 * 40 / 300, 300 / 1000, 0.40],
# ["2019-09-02", 0, 1e3 * 6 / 30, 30 / 300, 0.90],
# ]
# self._test_load(
# columns=columns,
# dimensions=[TIMESTAMP_COL, "waterfall_position"],
# dimensions_values=None,
# start_date=None,
# end_date=None,
# order_by=[TIMESTAMP_COL, "waterfall_position"],
# expected_values=values,
# )
def test_load_all_columns_aggregation(self):
columns = [
TIMESTAMP_COL,
"game",
"country",
"ad_network",
"ad_type",
"placement_id",
"waterfall_position",
"opportunities",
"count_cache_request",
"count_cache_success",
"count_view_start",
"count_view_click",
"requests",
"impressions",
"clicks",
"revenue",
]
dimensions = columns[:7]
values = [
[
"2019-09-01",
"1",
"us",
"source1",
"video",
"z",
2,
1000.0,
500,
310,
300,
40,
500,
300,
20,
40.0,
],
[
"2019-09-01",
"1",
"us",
"source2",
"media",
"b",
1,
1000.0,
200,
41,
30,
3,
200,
200,
40,
30.0,
],
[
"2019-09-01",
"1",
"us",
"source2",
"video",
"a",
0,
1000.0,
150,
104,
100,
20,
150,
100,
10,
20.0,
],
[
"2019-09-02",
"1",
"us",
"source2",
"video",
"a",
0,
300.0,
300,
200,
34,
1,
300,
30,
1,
6.0,
],
]
self._test_load(
columns=columns,
dimensions=dimensions,
dimensions_values=None,
start_date=None,
end_date=None,
order_by=dimensions,
expected_values=values,
)
def test_load_from_start_date(self):
columns = [TIMESTAMP_COL, "opportunities", "impressions", "revenue"]
values = [
["2019-09-02", 300, 30, 6],
]
self._test_load(
columns=columns,
dimensions=None,
dimensions_values=None,
start_date="2019-09-02",
end_date=None,
order_by=[TIMESTAMP_COL, "impressions"],
expected_values=values,
)
def test_load_until_end_date(self):
columns = [TIMESTAMP_COL, "opportunities", "impressions", "revenue"]
values = [
["2019-09-01", 1000, 100, 20],
["2019-09-01", 1000, 200, 30],
["2019-09-01", 1000, 300, 40],
]
self._test_load(
columns=columns,
dimensions=None,
dimensions_values=None,
start_date=None,
end_date="2019-09-01",
order_by=[TIMESTAMP_COL, "impressions"],
expected_values=values,
)
def test_load_date_range(self):
columns = [TIMESTAMP_COL, "opportunities", "impressions", "revenue"]
values = [
["2019-09-01", 1000, 100, 20],
["2019-09-01", 1000, 200, 30],
["2019-09-01", 1000, 300, 40],
]
self._test_load(
columns=columns,
dimensions=None,
dimensions_values=None,
start_date="2019-09-01",
end_date="2019-09-01",
order_by=[TIMESTAMP_COL, "impressions"],
expected_values=values,
)
def test_load_empty(self):
columns = [TIMESTAMP_COL, "opportunities", "impressions", "revenue"]
values = []
self._test_load(
columns=columns,
dimensions=None,
dimensions_values=None,
start_date="2019-09-03",
end_date="2019-09-04",
order_by=[TIMESTAMP_COL, "impressions"],
expected_values=values,
)
def test_load_dimensions_basic_columns(self):
columns = [TIMESTAMP_COL, "ad_network", "opportunities", "impressions"]
values = [
["2019-09-01", "source1", 1000, 300],
["2019-09-01", "source2", 1000, 300],
["2019-09-02", "source2", 300, 30],
]
self._test_load(
columns=columns,
dimensions=[TIMESTAMP_COL, "ad_network"],
dimensions_values=None,
start_date=None,
end_date=None,
order_by=[TIMESTAMP_COL, "ad_network"],
expected_values=values,
)
def test_load_dimensions_values(self):
columns = [TIMESTAMP_COL, "ad_network", "opportunities", "impressions"]
values = [
["2019-09-01", "source2", 1000, 100],
["2019-09-01", "source2", 1000, 200],
["2019-09-02", "source2", 300, 30],
]
self._test_load(
columns=columns,
dimensions=[TIMESTAMP_COL, "ad_network", "waterfall_position"],
dimensions_values=[None, "source2", None],
start_date=None,
end_date=None,
order_by=[TIMESTAMP_COL, "impressions"],
expected_values=values,
)
def test_load_multiple_dimensions_values(self):
columns = [TIMESTAMP_COL, "ad_network", "opportunities", "impressions"]
values = [
["2019-09-01", "source1", 1000, 300],
["2019-09-01", "source2", 1000, 100],
["2019-09-01", "source2", 1000, 200],
["2019-09-02", "source2", 300, 30],
]
self._test_load(
columns=columns,
dimensions=[TIMESTAMP_COL, "ad_network", "waterfall_position"],
dimensions_values=[None, ["source1", "source2"], None],
start_date=None,
end_date=None,
order_by=[TIMESTAMP_COL, "ad_network", "impressions"],
expected_values=values,
)
def test_load_multiple_dimensions_values_with_negation(self):
columns = [TIMESTAMP_COL, "ad_network", "opportunities", "impressions"]
values = [
["2019-09-01", "source1", 1000, 300],
]
self._test_load(
columns=columns,
dimensions=[TIMESTAMP_COL, "ad_network", "!ad_type", "!placement_id"],
dimensions_values=[None, None, "media", ["a", "x"]],
start_date=None,
end_date=None,
order_by=[TIMESTAMP_COL, "ad_network", "impressions"],
expected_values=values,
)
def test_dimension_values(self):
ret = self.time_series_extractor.dimensions_values(
dimensions=["ad_network"],
dimensions_values=None,
start_date=None,
end_date=None,
order_by=["ad_network"],
)
self.assertEqual(ret, [["source1"], ["source2"]])
def test_dimension_values_filtered(self):
ret = self.time_series_extractor.dimensions_values(
dimensions=["ad_network", "waterfall_position"],
dimensions_values=["source2", None],
start_date=None,
end_date=None,
order_by=["ad_network", "waterfall_position"],
)
expected = [
["source2", 0],
["source2", 1],
]
self.assertEqual(ret, expected)
def test_dimension_values_multiple_dimensions(self):
ret = self.time_series_extractor.dimensions_values(
dimensions=["ad_network", "ad_type"],
dimensions_values=["*", ["video"]],
start_date=None,
end_date=None,
order_by=["ad_network"],
)
expected = [
["source1", "video"],
["source2", "video"],
]
self.assertEqual(ret, expected)
def test_builded_query_prequery(self):
columns = [
"timestamp",
"game",
"country",
"ad_network",
"ad_type",
"placement_id",
]
prequery = "SET extra_float_digits = 3;"
order_by = ["ad_type"]
start_date = "2019-09-01"
end_date = "2019-09-02"
query = self.time_series_extractor.build_query(
columns=columns,
prequery=prequery,
start_date=start_date,
end_date=end_date,
order_by=order_by,
)
# remove empty spaces and new lines
returned_query = " ".join(query[0].split())
return_query = "SET extra_float_digits = 3; SELECT timestamp, game, country, ad_network, ad_type, placement_id FROM test_data WHERE timestamp >= '2019-09-01' AND timestamp <= '2019-09-02' ORDER BY ad_type"
self.assertEqual(returned_query, return_query)
def test_builded_query_extra_cond(self):
columns = [
"timestamp",
"game",
"country",
"ad_network",
"ad_type",
"placement_id",
]
order_by = ["ad_type"]
start_date = "2019-09-01"
end_date = "2019-09-02"
extra_where_conditions = ["game LIKE '%mario%'"]
query = self.time_series_extractor.build_query(
columns=columns,
start_date=start_date,
end_date=end_date,
order_by=order_by,
extra_where_conditions=extra_where_conditions,
)
# remove empty spaces and new lines
returned_query = " ".join(query[0].split())
return_query = "SELECT timestamp, game, country, ad_network, ad_type, placement_id FROM test_data WHERE timestamp >= '2019-09-01' AND timestamp <= '2019-09-02' AND game LIKE '%mario%' ORDER BY ad_type"
self.assertEqual(returned_query, return_query)
@classmethod
def setUpClass(cls):
super().setUp(cls)
cls.time_series_extractor = TimeSeriesExtractor(
cls.db_client, ConcreteTimeSeriesTable.__tablename__
)
engine = cls.db_client.get_engine()
ConcreteTimeSeriesTable.__table__.create(engine) # pylint:disable=no-member
ConcreteAdNetworkJoinTimeSeriesTable.__table__.create( # pylint:disable=no-member
engine
)
ConcretePlacementIdJoinTimeSeriesTable.__table__.create( # pylint:disable=no-member
engine
)
query = """
INSERT INTO test_data
(timestamp,
game,
country,
ad_network,
ad_type,
placement_id,
waterfall_position,
total_count_view_opportunity,
count_cache_request,
count_cache_success,
count_view_start,
count_view_click,
requests,
impressions,
clicks,
revenue
)
VALUES
('2019-09-01',
'1',
'us',
'source1',
'video',
'z',
2,
1000,
500,
310,
300,
40,
500,
300,
20,
40
),
('2019-09-01',
'1',
'us',
'source2',
'video',
'a',
0,
1000,
150,
104,
100,
20,
150,
100,
10,
20
),
('2019-09-01',
'1',
'us',
'source2',
'media',
'b',
1,
1000,
200,
41,
30,
3,
200,
200,
40,
30
),
('2019-09-02',
'1',
'us',
'source2',
'video',
'a',
0,
300,
300,
200,
34,
1,
300,
30,
1,
6
)
"""
cls.run_query(query)
query = """
INSERT INTO test_ad_network_join_data
(ad_network,
ad_network_group
)
VALUES
('source2',
'source_group_A'
),
('source1',
'source_group_B'
),
('source3',
'source_group_B'
)
"""
cls.run_query(query)
query = """
INSERT INTO test_placement_id_join_data
(placement_id,
placement_id_group
)
VALUES
('z',
'placement_group_1'
),
('a',
'placement_group_1'
),
('b',
'placement_group_2'
)
"""
cls.run_query(query)
@classmethod
def tearDownClass(cls):
# del cls.dataset
super().tearDown(cls)
def setUp(self):
pass
def tearDown(self):
pass
if __name__ == "__main__":
main()
| 31.152009
| 213
| 0.504827
|
af6da0056de4d9007832d2d961f0f4a6770ac8f2
| 1,668
|
py
|
Python
|
examples/sync/fanout/server.py
|
westover/tchannel-python
|
d9c16291f49b3b9dd1353c01179d4f4c3168c53a
|
[
"MIT"
] | null | null | null |
examples/sync/fanout/server.py
|
westover/tchannel-python
|
d9c16291f49b3b9dd1353c01179d4f4c3168c53a
|
[
"MIT"
] | null | null | null |
examples/sync/fanout/server.py
|
westover/tchannel-python
|
d9c16291f49b3b9dd1353c01179d4f4c3168c53a
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from tornado import gen, ioloop
from tchannel import TChannel, Response, thrift
tchannel = TChannel('thrift-server', hostport='localhost:54498')
service = thrift.load('tests/data/idls/ThriftTest.thrift')
@tchannel.thrift.register(service.ThriftTest)
@gen.coroutine
def testString(request):
assert request.headers == {'req': 'header'}
assert request.body.thing == 'req'
return Response('resp' * 100000, headers={'resp': 'header'})
tchannel.listen()
print tchannel.hostport
ioloop.IOLoop.current().start()
| 37.066667
| 79
| 0.767986
|
c8ea28859b109e06bfa39bae196bc98d39584db0
| 22,137
|
py
|
Python
|
cloudcafe/networking/networks/common/proxy_mgr/ssh_util.py
|
melissa-kam/cloudcafe
|
124c0923f7e7628267038cafe64339030bdaefda
|
[
"Apache-2.0"
] | null | null | null |
cloudcafe/networking/networks/common/proxy_mgr/ssh_util.py
|
melissa-kam/cloudcafe
|
124c0923f7e7628267038cafe64339030bdaefda
|
[
"Apache-2.0"
] | null | null | null |
cloudcafe/networking/networks/common/proxy_mgr/ssh_util.py
|
melissa-kam/cloudcafe
|
124c0923f7e7628267038cafe64339030bdaefda
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
import pexpect
class SshUnableToConnect(Exception):
MSG = 'Unable to connect to: {ip} (Type: {t_type})'
def __init__(self, target_ip=None, target_type=None):
target_type = target_type or 'Not Specified'
args = {'ip': target_ip, 't_type': target_type}
self.message = self.MSG.format(**args)
def __str__(self):
return self.message
class MissingCredentials(SshUnableToConnect):
MSG = 'Missing credentials to connect to: {ip} (Type: {t_type})'
class ConnectionNotFound(Exception):
def __str__(self):
return ('No open pexpect connection was provided. Was not type '
'pexpect.spawn')
class SshResponse(object):
str_concat = '{0!s}\n{1!s}'
def __init__(self):
"""
Supports pexpect connection info:
+ Contains open connection (if open)
+ Tracks various aspects of open connection: STDOUT, STDIN, STDERR
+ Tracks I/O per command (if commands issued over SSH connection)
+ errors = Any caught exceptions
+ stdin = all stdin
+ stdout = all stdout
+ output = interlaced stdin/stdout
"""
self.stdout = ''
self.stdin = ''
self.stderr = ''
self.output = ''
self.cmd_output = OrderedDict()
self.errors = ''
self.connection = None
def _add(self, attribute, value):
prop = getattr(self, attribute, self.stdout)
setattr(self, attribute, self.str_concat.format(prop, value))
self.output = self.str_concat.format(self.output, value)
def add_to_stdin(self, value):
self._add('stdin', value)
def add_to_stdout(self, value):
self._add('stdout', value)
def add_to_stderr(self, value):
self._add('stderr', value)
def add_to_cmd_out(self, cmd, value):
self._add('stdout', value)
self.cmd_output[cmd] = value
@property
def result(self):
return self.errors == ''
class SshMixin(object):
PSWD_PROMPT_REGEX = r'ssword\:\s*'
LINUX_PROMPT_REGEX = r'[$#>]\s*$'
SSH_KEY_REGEX = r'connecting\s+\(yes\/no\)\?\s*'
REFUSED = r'connection\s+refused'
DEFAULT_CMD = 'ls -alF'
def connect_to_proxy(
self, user=None, ip_address=None, password=None):
"""
Connect to proxy or local host
Note: Parameters are only for the remote connections
@param user: Use different user other than registered proxy user
@param password: Use different password other than registered pswd
@param ip_address: Use specific IP address other than proxy address
@return: Active SSH connection to target
"""
user = user or self.proxy_server_user
ip_address = ip_address or self.proxy_server_address
password = password or self.proxy_server_password
# Establish connection to proxy and return open connection
if self.use_proxy:
response_obj = self._ssh_from_here(
user=user, password=password, target_ip=ip_address)
conn = response_obj.connection
self.last_response = response_obj
# Return nothing, since it is a local connection.... for now, user can
# open pipe/process for local connection.
else:
conn = self._connect_to_local_proxy()
return conn
def can_ssh(self, target_ip, user, password, cmd=DEFAULT_CMD):
"""
Verifies SSH connectivity to specific target. The routine will connect
to the target IP and issue a single command. If the command returns
anything, including an error, SSH login was successful.
@param target_ip: SSH to IP
@param user: Log in as user 'x'
@param password: Log in using password
@param cmd: Command to execute if login worked
@return: (Boolean) : Did SSH work? True=YES, False=No
"""
output = self.ssh_to_target(
target_ip=target_ip, user=user, password=password, cmds=[cmd])
self.last_response = output
return cmd in output.output and output.errors == ''
def ssh_to_target(
self, target_ip=None, user=None, password=None, cmds=None,
proxy_user=None, proxy_pswd=None, proxy_ip=None,
close_when_done=True, response_obj=None, cmd_timeout=None):
"""
SSH to the target host from the specified host. If target_ip is not
specified, the response_obj with an open connection must be provided.
The open connection will be used to use the commands. If neither the
(target_ip, user, and password) or the response_obj is specified,
an UnableToConnect exception will be raised.
NOTE: Currently as implemented, only Linux hosts are supported by this
routine.
NOTE: These parameters are only optional if the response_obj is not
provided.
:param target_ip: IP Address to connect to
:param user: username for SSH connection
:param password: password for SSH connection
:param proxy_user: Specify different user than what was configured in
the proxy_mgr.
:param proxy_pswd: Specify different password than what was
configured in the proxy_mgr.
:param proxy_ip: Specific different target IP than what was
configured in the proxy mgr.
:param close_when_done: Close the connection when complete.
:param cmds: OrderDict of cmds to execute to verify connection
(DEFAULT = 'ls -alF'). The Key is the command, the value is the
regexp needed to validate the cmd response. If no commands are
executed, the open connection is returned within the response
object.
:param response_obj: Provided SshResponse Object a for open
connection to pass cmds to...
:param cmd_timeout: Timeout per command. Default = 30s (as per pexpect)
:return: SSH Response object
"""
if response_obj is not None:
self.display_conn_info(response_obj)
if response_obj is None:
# Make sure there is an IP to connect to...
if target_ip is None:
raise SshUnableToConnect(
target_ip=target_ip, target_type='target server')
password = password or self.proxy_server_password
if None in [user, password]:
raise MissingCredentials(
target_ip=target_ip, target_type='target server')
# Ok, we have enough info to log in...
msg = 'No connection was provided. Establishing connection.'
self.logger.info(msg)
ssh_args = {'target_ip': target_ip, 'user': user,
'password': password}
# If we need a proxy
if self.use_proxy:
# Get the proxy connection info...
proxy_user = proxy_user or self.proxy_server_user
proxy_pswd = proxy_pswd or self.proxy_server_password
proxy_ip = proxy_ip or self.proxy_server_address
if None in [proxy_user, proxy_pswd, proxy_ip]:
raise MissingCredentials(
target_ip=proxy_ip, target_type='proxy server')
# Establish and save the connection to proxy server
proxy_connection = self._ssh_from_here(
target_ip=proxy_ip, user=proxy_user, password=proxy_pswd)
ssh_args['response_obj'] = proxy_connection
self.logger.debug('Connection Hop Path: {0}'.format(
self._conn_path))
# Make sure we connected...
if proxy_connection.connection is None:
self.logger.error(
'Unable to connect to proxy: {ip}'.format(ip=proxy_ip))
self.logger.error(proxy_connection.errors)
raise SshUnableToConnect(
target_ip=proxy_ip, target_type='proxy')
# Make SSH connection and verify connection was successful.
response_obj = self._ssh_from_here(**ssh_args)
if response_obj.connection is None:
self.logger.error(
'Unable to connect to host: {ip}'.format(ip=proxy_ip))
self.logger.error(response_obj.errors)
self.logger.debug('Connection Hop Path: {0}'.format(
self._conn_path))
raise SshUnableToConnect(
target_ip=target_ip, target_type='target server')
# If there are commands to execute
if cmds is not None:
args = {'response_obj': response_obj, 'cmds': cmds}
if cmd_timeout is not None:
args['timeout'] = cmd_timeout
response_obj = self._cmds_via_open_connection(**args)
self.last_response = response_obj
# Close the connection if necessary.
if close_when_done:
self.close_connections(response_obj)
return response_obj
def _ssh_from_here(self, target_ip, user, password, response_obj=None):
"""
Connect via ssh using a pexpect process from the local host or an
open pexpect connection to remote host.
@param target_ip: The IP address of the target host
@param user: Username on target host to connect to host as...
@param password: Password on target host to connect to host as...
@param cmd: Command to execute on the host to validate connection
@param timeout: Connection Timeout (if exceeded, stop trying connection
and make connection as FAILED).
@return: String of connection output.
"""
response_obj = response_obj or SshResponse()
self.session_password = password
ssh_options = ('-oStrictHostKeyChecking=no '
'-oUserKnownHostsFile=/dev/null')
# Build SSH command
ssh_cmd = 'ssh {options} {user}@{ip}'.format(
user=user, ip=target_ip, options=ssh_options)
self.logger.debug('SSH INVOCATION CMD: {cmd}'.format(cmd=ssh_cmd))
response_obj.add_to_stdin(ssh_cmd)
# Build list of potential and expected output
# NOTE: LINUX_REGEX_PROMPT must be the last entry in the ordered dict
expectations = OrderedDict([
(pexpect.TIMEOUT, None),
(self.PSWD_PROMPT_REGEX, password),
(self.SSH_KEY_REGEX, 'yes'),
(self.REFUSED, None),
(self.LINUX_PROMPT_REGEX, None)])
# Set ssh process from the open connection
ssh_process = response_obj.connection
# If the open connection was empty, establish it from the local host
if ssh_process is None:
ssh_process = pexpect.spawn(ssh_cmd)
if ssh_process is None:
self.logger.error(
'Unable to connect to host: {ip}'.format(ip=target_ip))
raise SshUnableToConnect(target_ip=target_ip)
# Record the IP to track hops
if target_ip not in self._conn_path:
self._conn_path.append(target_ip)
ssh_process.delaybeforesend = self._pexpect_cmd_delay
response_obj.connection = ssh_process
# Use the open connection to establish an SSH connection to the target
else:
ssh_process.sendline(ssh_cmd)
if target_ip not in self._conn_path:
self._conn_path.append(target_ip)
while True:
# Watch the connection for expected output.
try:
response = ssh_process.expect(expectations.keys())
# TIMEOUT, break out of loop and indicate FAILURE
except pexpect.TIMEOUT:
err = "SSH'ing to target timed out. {0} --> {1}"
err_msg = err.format(
ssh_process.before, ssh_process.after)
self.logger.error(err_msg)
response_obj.errors += '{0}\n'.format(err_msg)
# Record IO and remove IP from the tracking list
response_obj.add_to_stdout(
str(ssh_process.before) + str(ssh_process.after))
self._conn_path.pop()
if not self._conn_path:
response_obj.connection = None
break
# CONNECTION CLOSED, save output and break out of loop.
except pexpect.EOF:
self.logger.debug('Reached END OF FILE')
response_obj.add_to_stdout(
str(ssh_process.before) + str(ssh_process.after))
self._conn_path.pop()
if not self._conn_path:
response_obj.connection = None
break
# If TIMEOUT returned by response, break out of loop and indicate
# FAILURE...
if response == 0:
err = "SSH'ing target timed out. {0} --> {1}"
err_msg = err.format(ssh_process.before, ssh_process.after)
self.logger.error(err_msg)
response_obj.errors += '{0}\n'.format(err_msg)
response_obj.add_to_stdout(
str(ssh_process.before) + str(ssh_process.after))
self._conn_path.pop()
if not self._conn_path:
response_obj.connection = None
break
# Connection refused
if response == 3:
err = "Connection Refused. {0} --> {1}"
err_msg = err.format(ssh_process.before, ssh_process.after)
self.logger.error(err_msg)
response_obj.errors += '{0}\n'.format(err_msg)
response_obj.add_to_stdout(
str(ssh_process.before) + str(ssh_process.after))
self._conn_path.pop()
if not self._conn_path:
response_obj.connection = None
break
# Connection established, the expected prompt was found
# (last element in the expectation ordered dict)
if response == len(expectations.keys()) - 1:
if 'connection refused' in ssh_process.before.lower():
response_obj.errors += 'Connection Refused: {0}\n'.format(
ssh_process.before)
response_obj.add_to_stdout(
str(ssh_process.before) + str(ssh_process.match.group()))
break
# Received expected output, transmit corresponding input
next_transmit = expectations[expectations.keys()[response]]
if next_transmit is None:
self.logger.warn("Didn't drop out of loop, but nothing "
"additional to transmit.")
self.logger.debug('Option pexpect returned: {0} of {1}'.format(
response, len(expectations.keys()) - 1))
self.logger.debug('Transaction thus far:\n{0}'.format(
str(ssh_process.before) + str(ssh_process.match.group()) +
str(ssh_process.after)))
break
# Transmit the next command in the process based on matched
# expectation
self.logger.debug("TX'ing: '{0}'".format(next_transmit))
response_obj.add_to_stdout(str(ssh_process.before) +
ssh_process.match.group())
response_obj.add_to_stdin(next_transmit)
ssh_process.sendline(next_transmit)
# Broke from loop, return all received output...
self.last_response = response_obj
return response_obj
def execute_cmds_via_open_connection(
self, connection, cmds, response_obj=None, close_conn=False,
timeout=None):
"""
Execute the list of commands on the open connection
@param connection: Open pexpect connection
@param cmds: list of commands to execute
@param response_obj: The SSH Response object; instantiated if !provided
@param close_conn: (Boolean), Close the connection when done?
@param timeout: (int) Max number of seconds to wait per command
@return: Populated response object
"""
if not isinstance(connection, pexpect.spawn):
raise ConnectionNotFound()
if response_obj is None:
response_obj = SshResponse()
response_obj.connection = connection
args = {'response_obj': response_obj, 'cmds': cmds}
if timeout is not None:
args['timeout'] = timeout
response_obj = self._cmds_via_open_connection(**args)
if close_conn:
self.close_connections(response_obj)
return response_obj
def _cmds_via_open_connection(self, response_obj, cmds, timeout=30):
"""
SSH from the local host using pexpect.
@param response_obj: Populated SshResponse Obj
@param cmds: Ordered Dict of commands to execute on the host to
validate connection
@param timeout: Amount of time allowed per cmd (default: 30s, which
is the default for pexpect)
@return: SshResponse Obj
"""
# Build list of potential and expected output
expectations = OrderedDict([
(pexpect.TIMEOUT, None),
(self.PSWD_PROMPT_REGEX, self.session_password),
(self.LINUX_PROMPT_REGEX, None)])
# Get the SSH connection to the target host
ssh_process = response_obj.connection
for cmd in cmds:
self.logger.debug("TX'ing CMD: '{0}'".format(cmd))
ssh_process.sendline(cmd)
response_obj.add_to_stdin(cmd)
while True:
# Watch connection for potential and expected output.
try:
response = ssh_process.expect(
expectations.keys(), timeout=timeout)
# TIMEOUT, break out of loop and indicate FAILURE
except pexpect.TIMEOUT:
err = "CMD '{cmd}' timed out. {before} --> {after}"
err_msg = err.format(
before=ssh_process.before, after=ssh_process.after,
cmd=cmd)
self.logger.error(err_msg)
response_obj.errors += '{0}\n'.format(err_msg)
self.logger.debug('Connection Hop Path: {0}'.format(
self._conn_path))
response_obj.add_to_stdout(str(ssh_process.before))
if not self._conn_path:
response_obj.connection = None
break
# CONNECTION CLOSED, save output and break out of loop.
except pexpect.EOF:
self.logger.debug('Reached END OF FILE')
response_obj.add_to_stdout(str(ssh_process.before))
if cmd == 'exit':
output = (str(ssh_process.before) +
str(ssh_process.after))
response_obj.add_to_cmd_out(cmd, output)
self._conn_path.pop()
if not self._conn_path:
response_obj.connection = None
break
# If TIMEOUT returned by response, break out of loop and
# indicate FAILURE...
if response == 0:
err = "CMD '{cmd}' timed out. {before} --> {after}"
err_msg = err.format(
before=ssh_process.before, after=ssh_process.after,
cmd=cmd)
self.logger.error(err_msg)
response_obj.errors += '{0}\n'.format(err_msg)
response_obj.add_to_stdout(
str(ssh_process.before) + str(ssh_process.after))
self.logger.debug('Connection Hop Path: {0}'.format(
self._conn_path))
break
if response == (len(expectations.keys()) - 1):
self.logger.debug('CMD {cmd} issued.'.format(cmd=cmd))
output = (str(ssh_process.before) +
ssh_process.match.group() +
str(ssh_process.after))
response_obj.add_to_cmd_out(cmd, output)
self.last_response = response_obj
break
# Transmit the next command/input based on matched expectation
next_transmit = expectations[expectations.keys()[response]]
self.logger.debug("TX'ing: '{0}'".format(next_transmit))
response_obj.add_to_stdout(
str(ssh_process.before) + ssh_process.match.group())
response_obj.add_to_stdin(next_transmit)
self.last_response = response_obj
# Broke from loop, return all received output...
self.last_response = response_obj
return response_obj
def close_connections(self, response_obj):
"""
Close all open connections, based on IP/hop tracking
@param response_obj: Populated SshResponse Object
@return: None
"""
self.logger.debug('Closing all open connections: {0}'.format(
self._conn_path))
# If there are connections open...
if getattr(response_obj, 'connection', None) is not None:
# Iterate through the hop list (if the connection is still open)
while (list(set(self._conn_path)) and
response_obj.connection is not None):
response_obj = self._cmds_via_open_connection(
response_obj, ['exit'])
self.last_response = response_obj
| 39.672043
| 79
| 0.582283
|
9a6a046c39abe53763000dd70f454b9d78f40e59
| 10,055
|
py
|
Python
|
glue/viewers/table/qt/viewer_widget.py
|
Jerry-Ma/glue
|
96b9bd3d8c8c9ed3f48f0e91a8829c5f24a04a46
|
[
"BSD-3-Clause"
] | 3
|
2015-09-10T22:23:55.000Z
|
2019-04-04T18:47:33.000Z
|
glue/viewers/table/qt/viewer_widget.py
|
astrofrog/glue
|
4aa8c64a6f65629207e40df9963232473a24c9f6
|
[
"BSD-3-Clause"
] | null | null | null |
glue/viewers/table/qt/viewer_widget.py
|
astrofrog/glue
|
4aa8c64a6f65629207e40df9963232473a24c9f6
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from qtpy.QtCore import Qt
from qtpy import QtCore, QtGui, QtWidgets
from qtpy import PYQT5
from matplotlib.colors import ColorConverter
from glue.utils.qt import get_qapp
from glue.config import viewer_tool
from glue.core.layer_artist import LayerArtistBase
from glue.core import message as msg
from glue.core import Data
from glue.utils import nonpartial
from glue.utils.qt import load_ui
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.toolbar import BasicToolbar
from glue.viewers.common.qt.tool import CheckableTool
from glue.core.subset import ElementSubsetState
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.state import lookup_class_with_patches
from glue.utils.colors import alpha_blend_colors
from glue.utils.qt import mpl_to_qt4_color
from glue.core.exceptions import IncompatibleAttribute
COLOR_CONVERTER = ColorConverter()
class DataTableModel(QtCore.QAbstractTableModel):
def __init__(self, table_viewer):
super(DataTableModel, self).__init__()
if table_viewer.data.ndim != 1:
raise ValueError("Can only use Table widget for 1D data")
self._table_viewer = table_viewer
self._data = table_viewer.data
self.show_hidden = False
self.order = np.arange(self._data.shape[0])
def data_changed(self):
top_left = self.index(0, 0)
bottom_right = self.index(self.columnCount(), self.rowCount())
self.dataChanged.emit(top_left, bottom_right)
@property
def columns(self):
if self.show_hidden:
return self._data.components
else:
return self._data.visible_components
def columnCount(self, index=None):
return len(self.columns)
def rowCount(self, index=None):
# Qt bug: Crashes on tables bigger than this
return min(self._data.size, 71582788)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
column_name = self.columns[section].label
units = self._data.get_component(self.columns[section]).units
if units != '':
column_name += "\n{0}".format(units)
return column_name
elif orientation == Qt.Vertical:
return str(self.order[section])
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DisplayRole:
c = self.columns[index.column()]
idx = self.order[index.row()]
comp = self._data.get_component(c)
if comp.categorical:
comp = comp.labels
else:
comp = comp.data
if isinstance(comp[idx], bytes):
return comp[idx].decode('ascii')
else:
return str(comp[idx])
elif role == Qt.BackgroundRole:
idx = self.order[index.row()]
# Find all subsets that this index is part of
colors = []
for layer_artist in self._table_viewer.layers[::-1]:
if layer_artist.visible:
subset = layer_artist.layer
try:
if subset.to_mask(view=slice(idx, idx + 1))[0]:
colors.append(subset.style.color)
except IncompatibleAttribute as exc:
layer_artist.disable_invalid_attributes(*exc.args)
else:
layer_artist.enabled = True
# Blend the colors using alpha blending
if len(colors) > 0:
color = alpha_blend_colors(colors, additional_alpha=0.5)
color = mpl_to_qt4_color(color)
return QtGui.QBrush(color)
def sort(self, column, ascending):
c = self.columns[column]
comp = self._data.get_component(c)
if comp.categorical:
self.order = np.argsort(comp.labels)
else:
self.order = np.argsort(comp.data)
if ascending == Qt.DescendingOrder:
self.order = self.order[::-1]
self.layoutChanged.emit()
class TableLayerArtist(LayerArtistBase):
def __init__(self, layer, table_viewer):
self._table_viewer = table_viewer
super(TableLayerArtist, self).__init__(layer)
def redraw(self):
self._table_viewer.model.data_changed()
def update(self):
pass
def clear(self):
pass
@viewer_tool
class RowSelectTool(CheckableTool):
tool_id = 'table:rowselect'
icon = 'glue_row_select'
action_text = 'Select row(s)'
tool_tip = ('Select rows by clicking on rows and pressing enter '
'once the selection is ready to be applied')
status_tip = ('CLICK to select, press ENTER to finalize selection, '
'ALT+CLICK or ALT+UP/DOWN to apply selection immediately')
def __init__(self, viewer):
super(RowSelectTool, self).__init__(viewer)
self.deactivate()
def activate(self):
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
def deactivate(self):
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.viewer.ui.table.clearSelection()
class TableViewWithSelectionSignal(QtWidgets.QTableView):
selection_changed = QtCore.Signal()
def selectionChanged(self, *args, **kwargs):
self.selection_changed.emit()
super(TableViewWithSelectionSignal, self).selectionChanged(*args, **kwargs)
class TableWidget(DataViewer):
LABEL = "Table Viewer"
_toolbar_cls = BasicToolbar
tools = ['table:rowselect']
def __init__(self, session, parent=None, widget=None):
super(TableWidget, self).__init__(session, parent)
self.ui = load_ui('viewer_widget.ui',
directory=os.path.dirname(__file__))
self.setCentralWidget(self.ui)
hdr = self.ui.table.horizontalHeader()
hdr.setStretchLastSection(True)
hdr.setSectionResizeMode(hdr.Interactive)
hdr = self.ui.table.verticalHeader()
hdr.setSectionResizeMode(hdr.Interactive)
self.model = None
self.ui.table.selection_changed.connect(self.selection_changed)
def selection_changed(self):
app = get_qapp()
if app.queryKeyboardModifiers() == Qt.AltModifier:
self.finalize_selection(clear=False)
def keyPressEvent(self, event):
if self.toolbar.active_tool is self.toolbar.tools['table:rowselect']:
if event.key() in [Qt.Key_Enter, Qt.Key_Return]:
self.finalize_selection()
super(TableWidget, self).keyPressEvent(event)
def finalize_selection(self, clear=True):
model = self.ui.table.selectionModel()
selected_rows = [self.model.order[x.row()] for x in model.selectedRows()]
subset_state = ElementSubsetState(indices=selected_rows, data=self.data)
mode = EditSubsetMode()
mode.update(self._data, subset_state, focus_data=self.data)
if clear:
# We block the signals here to make sure that we don't update
# the subset again once the selection is cleared.
self.ui.table.blockSignals(True)
self.ui.table.clearSelection()
self.ui.table.blockSignals(False)
def register_to_hub(self, hub):
super(TableWidget, self).register_to_hub(hub)
def dfilter(x):
return x.sender.data is self.data
hub.subscribe(self, msg.SubsetCreateMessage,
handler=nonpartial(self._refresh),
filter=dfilter)
hub.subscribe(self, msg.SubsetUpdateMessage,
handler=nonpartial(self._refresh),
filter=dfilter)
hub.subscribe(self, msg.SubsetDeleteMessage,
handler=nonpartial(self._refresh),
filter=dfilter)
hub.subscribe(self, msg.DataUpdateMessage,
handler=nonpartial(self._refresh),
filter=dfilter)
def _refresh(self):
self._sync_layers()
self.model.data_changed()
def _sync_layers(self):
# For now we don't show the data in the list because it always has to
# be shown
for layer_artist in self.layers:
if layer_artist.layer not in self.data.subsets:
self._layer_artist_container.remove(layer_artist)
for subset in self.data.subsets:
if subset not in self._layer_artist_container:
self._layer_artist_container.append(TableLayerArtist(subset, self))
def add_data(self, data):
self.data = data
self.setUpdatesEnabled(False)
self.model = DataTableModel(self)
self.ui.table.setModel(self.model)
self.setUpdatesEnabled(True)
self._sync_layers()
return True
def add_subset(self, subset):
return True
def unregister(self, hub):
pass
def closeEvent(self, event):
"""
On close, Qt seems to scan through the entire model
if the data set is big. To sidestep that,
we swap out with a tiny data set before closing
"""
d = Data(x=[0])
self.ui.table.setModel(DataTableModel(d))
event.accept()
def restore_layers(self, rec, context):
# For now this is a bit of a hack, we assume that all subsets saved
# for this viewer are from dataset, so we just get Data object
# then just sync the layers.
for layer in rec:
c = lookup_class_with_patches(layer.pop('_type'))
props = dict((k, context.object(v)) for k, v in layer.items())
layer = props['layer']
self.add_data(layer.data)
break
self._sync_layers()
| 33.294702
| 92
| 0.632223
|
bacbd035dd458fb976315f01756fc291d622c7ac
| 2,807
|
py
|
Python
|
app/core/test/test_models.py
|
veerpratik/receipe-app-api
|
a4eda089d71c2d2e953ed5bc9dbb356ee36f4098
|
[
"MIT"
] | null | null | null |
app/core/test/test_models.py
|
veerpratik/receipe-app-api
|
a4eda089d71c2d2e953ed5bc9dbb356ee36f4098
|
[
"MIT"
] | null | null | null |
app/core/test/test_models.py
|
veerpratik/receipe-app-api
|
a4eda089d71c2d2e953ed5bc9dbb356ee36f4098
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@demo.com', password='testpass'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTest(TestCase):
def test_create_user_with_email_successfull(self):
"""create user with email and password """
email = "pratikveer@demo.com"
password = "test@1234"
user = get_user_model().objects.create_user(
email=email,
password=password,
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalize(self):
email = "pratikveer@DEMO.COM"
user = get_user_model().objects.create_user(email, "abcd")
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
""" Test craeting user with no email raises error"""
with self.assertRaises(
ValueError
): # hyachya khali je sentense astil te valuerror detata check krtoo
get_user_model().objects.create_user(None, "test")
# if raise error by above senetence then it test pass
def test_creating_new_superuser(self):
""" Test creating new superuser """
user = get_user_model().objects.create_superuser("admin@demo.com",
"admin@1234")
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""Test the ingredient string representation"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the recipe string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 33.023529
| 77
| 0.631635
|
bb7eace41dfe9e1d4840dc454209efa9cfbdaaef
| 21,174
|
py
|
Python
|
ce_bbx.py
|
krzysztoffiok/c_elegans_fitness
|
d16270f882890aa42df55739d9bb0efc2e2168e6
|
[
"MIT"
] | 1
|
2022-01-31T15:49:01.000Z
|
2022-01-31T15:49:01.000Z
|
ce_bbx.py
|
krzysztoffiok/c_elegans_fitness
|
d16270f882890aa42df55739d9bb0efc2e2168e6
|
[
"MIT"
] | null | null | null |
ce_bbx.py
|
krzysztoffiok/c_elegans_fitness
|
d16270f882890aa42df55739d9bb0efc2e2168e6
|
[
"MIT"
] | null | null | null |
"""
Mask R-CNN Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
Modified for CElegans fitness assessment by Krzysztof Fiok
------------------------------------------------------------
Example use:
To analyze images:
python3 ce_bbx.py inference --image_folder=/path/to/images/for/inference --DMC=0.9 --NMS=0.6 --model=/path/to/model_weights
To compute modified MS COCO metrics:
python3 ce_bbx.py evaluate --dataset=/path/to/evaluation_dataset --DMC=0.9 --NMS=0.6 --model=/path/to/model_weights
To draw Precision Recall Curve (PRC):
python3 ce_bbx.py evaluate_PRC ----dataset=/path/to/evaluation_dataset --model=/path/to/model_weights
To train your own model:
python3 ce_bbx.py train --dataset=/path/to/train_dataset --model=path_to_initial_model_weights
"""
import os
import sys
import time
import numpy as np
import imgaug
import skimage.draw
from mrcnn import visualize
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import IPython.display
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
# Root directory of the project
ROOT_DIR = os.path.abspath("")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "", "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = ""
############################################################
# Configurations
############################################################
class CElegansConfig(Config):
"""Configuration for training on .
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "CE"
# Depends on GPU RAM
IMAGES_PER_GPU = 1
# (default is 1)
GPU_COUNT = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 2 # CE has 2 classes
RPN_NMS_THRESHOLD = 0.9
DETECTION_MIN_CONFIDENCE = 0.7
############################################################
# Dataset
############################################################
class CElegansDataset(utils.Dataset):
def load_celegans(self, dataset_dir, subset, class_ids=None, return_coco=False):
"""Load a subset of the CE dataset.
dataset_dir: The root directory of the CE dataset.
subset: What to load (train, val)
class_ids: If provided, only loads images that have the given classes.
return_coco: If True, returns the COCO object.
"""
coco = COCO("{}/annotations/{}.json".format(dataset_dir, subset))
image_dir = "{}/{}".format(dataset_dir, subset)
class_ids = sorted(coco.getCatIds())
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "coco":
return super(CElegansDataset, self).load_mask(image_id)
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"coco.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(CElegansDataset, self).load_mask(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def evaluate_coco_PRC(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
r["class_ids"] = [x-1 for x in r["class_ids"]]
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.params.iouThrs = [0.1]
cocoEval.params.areaRng = [[0, 10000000000.0]]
cocoEval.params.maxDets = [100]
cocoEval.evaluate()
cocoEval.accumulate()
precision = cocoEval.eval['precision'][0, :, 0, 0, 0]
recall = cocoEval.params.recThrs
plt.plot(recall, precision, 'ro')
plt.xlabel('recall')
plt.ylabel('precision')
plt.title('PRC IoU 0,5')
plt.savefig(fname='PRC' + str(limit) + '.jpg')
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
# Pick COCO images from the dataset
# image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# r["class_ids"] = [x-1 for x in r["class_ids"]]
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
print('Original COCO metrics')
sumcoco = cocoEval.summarize_coco()
sumcoco = pd.DataFrame(sumcoco)
print('Original PASCAL VOC metrics')
sumvoc = cocoEval.summarize_voc()
sumvoc = pd.DataFrame(sumvoc)
sumcoco.to_csv('output_coco_%s.csv' % args.model[-6:])
sumvoc.to_csv('output_voc_%s.csv' % args.model[-6:])
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument('--dataset', required=False,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--model', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--image_folder', required=False,
metavar="folder path",
help='Folder path with images for inference')
parser.add_argument('--results', required=False, default='', help="String added at end of results path")
parser.add_argument('--DMC', required=False, default=0.95, type=float, help="Provide Detection Max Confidence")
parser.add_argument('--NMS', required=False, default=0.5, type=float, help="Provide Non Maximum Suppression")
args = parser.parse_args()
dmc = args.DMC
nms = args.NMS
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = CElegansConfig()
else:
import ce_segmentation
config = ce_segmentation.CElegansInferenceConfig()
config.DETECTION_MIN_CONFIDENCE = float(dmc)
config.DETECTION_NMS_THRESHOLD = float(nms)
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()
else:
model_path = args.model
# Load weights
if args.model.lower() == "coco":
model.load_weights(model_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
print("Loading weights ", model_path)
model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset
dataset_train = CElegansDataset()
dataset_train.load_celegans(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = CElegansDataset()
dataset_val.load_celegans(args.dataset, "val")
dataset_val.prepare()
# Image Augmentation
augmentation = imgaug.augmenters.CoarseDropout(p=0.1, size_percent=0.3)
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=50,
layers='heads',
augmentation=None)
print("Training 4+ network layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=100,
layers='4+',
augmentation=None)
print("Training all network layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 5,
epochs=150,
layers='all',
augmentation=None)
elif args.command == "inference":
class_names = ['BG', 'CE', 'CE_glow', 'dec', 'dead', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
column_names = ['CE', 'CE_glow', 'file_name']
bbx_info_list = pd.DataFrame(columns=['y1', 'x1', 'y2', 'x2', 'file_name'])
image_file_list = []
output_path = (os.path.join(ROOT_DIR, 'result_%s' % str(args.model), args.results))
try:
os.makedirs(output_path)
except FileExistsError:
print('----- The output folder already exists, overwriting ----')
CE_list = []
CE_glow_list = []
for root, dirs, files in os.walk(args.image_folder, topdown=False):
for name in files:
print(name)
input_file_path = os.path.join(root, name)
image = skimage.io.imread(input_file_path)
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
class_count, bbx_info = visualize.display_instances(output_path, name, image, r['rois'],
r['masks'], r['class_ids'],
class_names, r['scores'], show_mask=True)
image_file_list.append(str(name))
class_count = class_count.T
print(class_count)
CE_list.append((class_count.iloc[0, 0]))
CE_glow_list.append(str(class_count.iloc[0, 1]))
bbx_info_list = pd.concat([bbx_info_list, bbx_info], axis=0, sort=True)
class_count_list = {"file_name": image_file_list, "CE": CE_list, "CE_glow": CE_glow_list}
class_count_list = pd.DataFrame(class_count_list)
class_count_list.to_csv(os.path.join(output_path, 'class_count.csv'))
bbx_info_list = bbx_info_list.sort_values(by=['file_name'])
bbx_info_list.to_csv(os.path.join(output_path, 'bbx_info.csv'))
print('The results were saved to: ', output_path)
elif args.command == "eval_table":
dataset_val = CElegansDataset()
coco = dataset_val.load_celegans(args.dataset, "val")
dataset_val.prepare()
evaluate(model, dataset_val, coco, "bbox", limit=int(args.limit))
elif args.command == "evaluate":
dataset_val = CElegansDataset()
coco = dataset_val.load_celegans(args.dataset, "val", return_coco=True)
dataset_val.prepare()
evaluate_coco_PRC(model, dataset_val, coco, "bbox", limit=int(args.limit))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
| 38.568306
| 124
| 0.561349
|
70217a2ce52db7b03219d1fde795b4a7d5c8d948
| 119,676
|
py
|
Python
|
pyboto3/devicefarm.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/devicefarm.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/devicefarm.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_device_pool(projectArn=None, name=None, description=None, rules=None):
"""
Creates a device pool.
See also: AWS API Documentation
Examples
The following example creates a new device pool named MyDevicePool inside an existing project.
Expected Output:
:example: response = client.create_device_pool(
projectArn='string',
name='string',
description='string',
rules=[
{
'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'APPIUM_VERSION',
'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',
'value': 'string'
},
]
)
:type projectArn: string
:param projectArn: [REQUIRED]
The ARN of the project for the device pool.
:type name: string
:param name: [REQUIRED]
The device pool's name.
:type description: string
:param description: The device pool's description.
:type rules: list
:param rules: [REQUIRED]
The device pool's rules.
(dict) --Represents a condition for a device pool.
attribute (string) --The rule's stringified attribute. For example, specify the value as '\'abc\'' .
Allowed values include:
ARN: The ARN.
FORM_FACTOR: The form factor (for example, phone or tablet).
MANUFACTURER: The manufacturer.
PLATFORM: The platform (for example, Android or iOS).
REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access.
APPIUM_VERSION: The Appium version for the test.
operator (string) --The rule's operator.
EQUALS: The equals operator.
GREATER_THAN: The greater-than operator.
IN: The in operator.
LESS_THAN: The less-than operator.
NOT_IN: The not-in operator.
CONTAINS: The contains operator.
value (string) --The rule's value.
:rtype: dict
:return: {
'devicePool': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'rules': [
{
'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'APPIUM_VERSION',
'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',
'value': 'string'
},
]
}
}
:returns:
CURATED: A device pool that is created and managed by AWS Device Farm.
PRIVATE: A device pool that is created and managed by the device pool developer.
"""
pass
def create_network_profile(projectArn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):
"""
Creates a network profile.
See also: AWS API Documentation
:example: response = client.create_network_profile(
projectArn='string',
name='string',
description='string',
type='CURATED'|'PRIVATE',
uplinkBandwidthBits=123,
downlinkBandwidthBits=123,
uplinkDelayMs=123,
downlinkDelayMs=123,
uplinkJitterMs=123,
downlinkJitterMs=123,
uplinkLossPercent=123,
downlinkLossPercent=123
)
:type projectArn: string
:param projectArn: [REQUIRED]
The Amazon Resource Name (ARN) of the project for which you want to create a network profile.
:type name: string
:param name: [REQUIRED]
The name you wish to specify for the new network profile.
:type description: string
:param description: The description of the network profile.
:type type: string
:param type: The type of network profile you wish to create. Valid values are listed below.
:type uplinkBandwidthBits: integer
:param uplinkBandwidthBits: The data throughput rate in bits per second, as an integer from 0 to 104857600.
:type downlinkBandwidthBits: integer
:param downlinkBandwidthBits: The data throughput rate in bits per second, as an integer from 0 to 104857600.
:type uplinkDelayMs: integer
:param uplinkDelayMs: Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.
:type downlinkDelayMs: integer
:param downlinkDelayMs: Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.
:type uplinkJitterMs: integer
:param uplinkJitterMs: Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.
:type downlinkJitterMs: integer
:param downlinkJitterMs: Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.
:type uplinkLossPercent: integer
:param uplinkLossPercent: Proportion of transmitted packets that fail to arrive from 0 to 100 percent.
:type downlinkLossPercent: integer
:param downlinkLossPercent: Proportion of received packets that fail to arrive from 0 to 100 percent.
:rtype: dict
:return: {
'networkProfile': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'uplinkBandwidthBits': 123,
'downlinkBandwidthBits': 123,
'uplinkDelayMs': 123,
'downlinkDelayMs': 123,
'uplinkJitterMs': 123,
'downlinkJitterMs': 123,
'uplinkLossPercent': 123,
'downlinkLossPercent': 123
}
}
"""
pass
def create_project(name=None, defaultJobTimeoutMinutes=None):
"""
Creates a new project.
See also: AWS API Documentation
Examples
The following example creates a new project named MyProject.
Expected Output:
:example: response = client.create_project(
name='string',
defaultJobTimeoutMinutes=123
)
:type name: string
:param name: [REQUIRED]
The project's name.
:type defaultJobTimeoutMinutes: integer
:param defaultJobTimeoutMinutes: Sets the execution timeout value (in minutes) for a project. All test runs in this project will use the specified execution timeout value unless overridden when scheduling a run.
:rtype: dict
:return: {
'project': {
'arn': 'string',
'name': 'string',
'defaultJobTimeoutMinutes': 123,
'created': datetime(2015, 1, 1)
}
}
"""
pass
def create_remote_access_session(projectArn=None, deviceArn=None, name=None, configuration=None):
"""
Specifies and starts a remote access session.
See also: AWS API Documentation
Examples
The following example creates a remote access session named MySession.
Expected Output:
:example: response = client.create_remote_access_session(
projectArn='string',
deviceArn='string',
name='string',
configuration={
'billingMethod': 'METERED'|'UNMETERED'
}
)
:type projectArn: string
:param projectArn: [REQUIRED]
The Amazon Resource Name (ARN) of the project for which you want to create a remote access session.
:type deviceArn: string
:param deviceArn: [REQUIRED]
The Amazon Resource Name (ARN) of the device for which you want to create a remote access session.
:type name: string
:param name: The name of the remote access session that you wish to create.
:type configuration: dict
:param configuration: The configuration information for the remote access session request.
billingMethod (string) --Returns the billing method for purposes of configuring a remote access session.
:rtype: dict
:return: {
'remoteAccessSession': {
'arn': 'string',
'name': 'string',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'message': 'string',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
'billingMethod': 'METERED'|'UNMETERED',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
},
'endpoint': 'string'
}
}
:returns:
PENDING: A pending status.
PENDING_CONCURRENCY: A pending concurrency status.
PENDING_DEVICE: A pending device status.
PROCESSING: A processing status.
SCHEDULING: A scheduling status.
PREPARING: A preparing status.
RUNNING: A running status.
COMPLETED: A completed status.
STOPPING: A stopping status.
"""
pass
def create_upload(projectArn=None, name=None, type=None, contentType=None):
"""
Uploads an app or test scripts.
See also: AWS API Documentation
Examples
The following example creates a new Appium Python test package upload inside an existing project.
Expected Output:
:example: response = client.create_upload(
projectArn='string',
name='string',
type='ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE',
contentType='string'
)
:type projectArn: string
:param projectArn: [REQUIRED]
The ARN of the project for the upload.
:type name: string
:param name: [REQUIRED]
The upload's file name. The name should not contain the '/' character. If uploading an iOS app, the file name needs to end with the .ipa extension. If uploading an Android app, the file name needs to end with the .apk extension. For all others, the file name must end with the .zip file extension.
:type type: string
:param type: [REQUIRED]
The upload's upload type.
Must be one of the following values:
ANDROID_APP: An Android upload.
IOS_APP: An iOS upload.
WEB_APP: A web appliction upload.
EXTERNAL_DATA: An external data upload.
APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.
APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.
APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.
APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.
APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.
APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload.
CALABASH_TEST_PACKAGE: A Calabash test package upload.
INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.
UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.
UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.
XCTEST_TEST_PACKAGE: An XCode test package upload.
XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.
Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.
:type contentType: string
:param contentType: The upload's content type (for example, 'application/octet-stream').
:rtype: dict
:return: {
'upload': {
'arn': 'string',
'name': 'string',
'created': datetime(2015, 1, 1),
'type': 'ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE',
'status': 'INITIALIZED'|'PROCESSING'|'SUCCEEDED'|'FAILED',
'url': 'string',
'metadata': 'string',
'contentType': 'string',
'message': 'string'
}
}
:returns:
ANDROID_APP: An Android upload.
IOS_APP: An iOS upload.
WEB_APP: A web appliction upload.
EXTERNAL_DATA: An external data upload.
APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.
APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.
APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.
APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.
APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.
APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload.
CALABASH_TEST_PACKAGE: A Calabash test package upload.
INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.
UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.
UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.
XCTEST_TEST_PACKAGE: An XCode test package upload.
XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.
"""
pass
def delete_device_pool(arn=None):
"""
Deletes a device pool given the pool ARN. Does not allow deletion of curated pools owned by the system.
See also: AWS API Documentation
Examples
The following example deletes a specific device pool.
Expected Output:
:example: response = client.delete_device_pool(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
Represents the Amazon Resource Name (ARN) of the Device Farm device pool you wish to delete.
:rtype: dict
:return: {}
"""
pass
def delete_network_profile(arn=None):
"""
Deletes a network profile.
See also: AWS API Documentation
:example: response = client.delete_network_profile(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the network profile you want to delete.
:rtype: dict
:return: {}
"""
pass
def delete_project(arn=None):
"""
Deletes an AWS Device Farm project, given the project ARN.
See also: AWS API Documentation
Examples
The following example deletes a specific project.
Expected Output:
:example: response = client.delete_project(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
Represents the Amazon Resource Name (ARN) of the Device Farm project you wish to delete.
:rtype: dict
:return: {}
"""
pass
def delete_remote_access_session(arn=None):
"""
Deletes a completed remote access session and its results.
See also: AWS API Documentation
Examples
The following example deletes a specific remote access session.
Expected Output:
:example: response = client.delete_remote_access_session(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the sesssion for which you want to delete remote access.
:rtype: dict
:return: {}
"""
pass
def delete_run(arn=None):
"""
Deletes the run, given the run ARN.
See also: AWS API Documentation
Examples
The following example deletes a specific test run.
Expected Output:
:example: response = client.delete_run(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) for the run you wish to delete.
:rtype: dict
:return: {}
"""
pass
def delete_upload(arn=None):
"""
Deletes an upload given the upload ARN.
See also: AWS API Documentation
Examples
The following example deletes a specific upload.
Expected Output:
:example: response = client.delete_upload(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
Represents the Amazon Resource Name (ARN) of the Device Farm upload you wish to delete.
:rtype: dict
:return: {}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_account_settings():
"""
Returns the number of unmetered iOS and/or unmetered Android devices that have been purchased by the account.
See also: AWS API Documentation
Examples
The following example returns information about your Device Farm account settings.
Expected Output:
:example: response = client.get_account_settings()
:rtype: dict
:return: {
'accountSettings': {
'awsAccountNumber': 'string',
'unmeteredDevices': {
'string': 123
},
'unmeteredRemoteAccessDevices': {
'string': 123
},
'maxJobTimeoutMinutes': 123,
'trialMinutes': {
'total': 123.0,
'remaining': 123.0
},
'maxSlots': {
'string': 123
},
'defaultJobTimeoutMinutes': 123
}
}
:returns:
(string) --
(integer) --
"""
pass
def get_device(arn=None):
"""
Gets information about a unique device type.
See also: AWS API Documentation
Examples
The following example returns information about a specific device.
Expected Output:
:example: response = client.get_device(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The device type's ARN.
:rtype: dict
:return: {
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
}
}
:returns:
ANDROID: The Android platform.
IOS: The iOS platform.
"""
pass
def get_device_pool(arn=None):
"""
Gets information about a device pool.
See also: AWS API Documentation
Examples
The following example returns information about a specific device pool, given a project ARN.
Expected Output:
:example: response = client.get_device_pool(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The device pool's ARN.
:rtype: dict
:return: {
'devicePool': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'rules': [
{
'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'APPIUM_VERSION',
'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',
'value': 'string'
},
]
}
}
:returns:
ARN: The ARN.
FORM_FACTOR: The form factor (for example, phone or tablet).
MANUFACTURER: The manufacturer.
PLATFORM: The platform (for example, Android or iOS).
REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access.
APPIUM_VERSION: The Appium version for the test.
"""
pass
def get_device_pool_compatibility(devicePoolArn=None, appArn=None, testType=None, test=None):
"""
Gets information about compatibility with a device pool.
See also: AWS API Documentation
Examples
The following example returns information about the compatibility of a specific device pool, given its ARN.
Expected Output:
:example: response = client.get_device_pool_compatibility(
devicePoolArn='string',
appArn='string',
testType='BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
test={
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'testPackageArn': 'string',
'filter': 'string',
'parameters': {
'string': 'string'
}
}
)
:type devicePoolArn: string
:param devicePoolArn: [REQUIRED]
The device pool's ARN.
:type appArn: string
:param appArn: The ARN of the app that is associated with the specified device pool.
:type testType: string
:param testType: The test type for the specified device pool.
Allowed values include the following:
BUILTIN_FUZZ: The built-in fuzz type.
BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.
APPIUM_JAVA_JUNIT: The Appium Java JUnit type.
APPIUM_JAVA_TESTNG: The Appium Java TestNG type.
APPIUM_PYTHON: The Appium Python type.
APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.
APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.
APPIUM_WEB_PYTHON: The Appium Python type for Web apps.
CALABASH: The Calabash type.
INSTRUMENTATION: The Instrumentation type.
UIAUTOMATION: The uiautomation type.
UIAUTOMATOR: The uiautomator type.
XCTEST: The XCode test type.
XCTEST_UI: The XCode UI test type.
:type test: dict
:param test: Information about the uploaded test to be run against the device pool.
type (string) -- [REQUIRED]The test's type.
Must be one of the following values:
BUILTIN_FUZZ: The built-in fuzz type.
BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.
APPIUM_JAVA_JUNIT: The Appium Java JUnit type.
APPIUM_JAVA_TESTNG: The Appium Java TestNG type.
APPIUM_PYTHON: The Appium Python type.
APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.
APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.
APPIUM_WEB_PYTHON: The Appium Python type for Web apps.
CALABASH: The Calabash type.
INSTRUMENTATION: The Instrumentation type.
UIAUTOMATION: The uiautomation type.
UIAUTOMATOR: The uiautomator type.
XCTEST: The XCode test type.
XCTEST_UI: The XCode UI test type.
testPackageArn (string) --The ARN of the uploaded test that will be run.
filter (string) --The test's filter.
parameters (dict) --The test's parameters, such as the following test framework parameters and fixture settings:
For Calabash tests:
profile: A cucumber profile, for example, 'my_profile_name'.
tags: You can limit execution to features or scenarios that have (or don't have) certain tags, for example, '@smoke' or '@smoke,~@wip'.
For Appium tests (all types):
appium_version: The Appium version. Currently supported values are '1.4.16', '1.6.3', 'latest', and 'default'.
latest will run the latest Appium version supported by Device Farm (1.6.3).
For default , Device Farm will choose a compatible version of Appium for the device. The current behavior is to run 1.4.16 on Android devices and iOS 9 and earlier, 1.6.3 for iOS 10 and later.
This behavior is subject to change.
For Fuzz tests (Android only):
event_count: The number of events, between 1 and 10000, that the UI fuzz test should perform.
throttle: The time, in ms, between 0 and 1000, that the UI fuzz test should wait between events.
seed: A seed to use for randomizing the UI fuzz test. Using the same seed value between tests ensures identical event sequences.
For Explorer tests:
username: A username to use if the Explorer encounters a login form. If not supplied, no username will be inserted.
password: A password to use if the Explorer encounters a login form. If not supplied, no password will be inserted.
For Instrumentation:
filter: A test filter string. Examples:
Running a single test case: 'com.android.abc.Test1'
Running a single test: 'com.android.abc.Test1#smoke'
Running multiple tests: 'com.android.abc.Test1,com.android.abc.Test2'
For XCTest and XCTestUI:
filter: A test filter string. Examples:
Running a single test class: 'LoginTests'
Running a multiple test classes: 'LoginTests,SmokeTests'
Running a single test: 'LoginTests/testValid'
Running multiple tests: 'LoginTests/testValid,LoginTests/testInvalid'
For UIAutomator:
filter: A test filter string. Examples:
Running a single test case: 'com.android.abc.Test1'
Running a single test: 'com.android.abc.Test1#smoke'
Running multiple tests: 'com.android.abc.Test1,com.android.abc.Test2'
(string) --
(string) --
:rtype: dict
:return: {
'compatibleDevices': [
{
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
'compatible': True|False,
'incompatibilityMessages': [
{
'message': 'string',
'type': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'APPIUM_VERSION'
},
]
},
],
'incompatibleDevices': [
{
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
'compatible': True|False,
'incompatibilityMessages': [
{
'message': 'string',
'type': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'APPIUM_VERSION'
},
]
},
]
}
:returns:
PHONE: The phone form factor.
TABLET: The tablet form factor.
"""
pass
def get_job(arn=None):
"""
Gets information about a job.
See also: AWS API Documentation
Examples
The following example returns information about a specific job.
Expected Output:
:example: response = client.get_job(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The job's ARN.
:rtype: dict
:return: {
'job': {
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
}
}
}
:returns:
PENDING: A pending status.
PENDING_CONCURRENCY: A pending concurrency status.
PENDING_DEVICE: A pending device status.
PROCESSING: A processing status.
SCHEDULING: A scheduling status.
PREPARING: A preparing status.
RUNNING: A running status.
COMPLETED: A completed status.
STOPPING: A stopping status.
"""
pass
def get_network_profile(arn=None):
"""
Returns information about a network profile.
See also: AWS API Documentation
:example: response = client.get_network_profile(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the network profile you want to return information about.
:rtype: dict
:return: {
'networkProfile': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'uplinkBandwidthBits': 123,
'downlinkBandwidthBits': 123,
'uplinkDelayMs': 123,
'downlinkDelayMs': 123,
'uplinkJitterMs': 123,
'downlinkJitterMs': 123,
'uplinkLossPercent': 123,
'downlinkLossPercent': 123
}
}
"""
pass
def get_offering_status(nextToken=None):
"""
Gets the current status and future status of all offerings purchased by an AWS account. The response indicates how many offerings are currently available and the offerings that will be available in the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.
See also: AWS API Documentation
Examples
The following example returns information about Device Farm offerings available to your account.
Expected Output:
:example: response = client.get_offering_status(
nextToken='string'
)
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'current': {
'string': {
'type': 'PURCHASE'|'RENEW'|'SYSTEM',
'offering': {
'id': 'string',
'description': 'string',
'type': 'RECURRING',
'platform': 'ANDROID'|'IOS',
'recurringCharges': [
{
'cost': {
'amount': 123.0,
'currencyCode': 'USD'
},
'frequency': 'MONTHLY'
},
]
},
'quantity': 123,
'effectiveOn': datetime(2015, 1, 1)
}
},
'nextPeriod': {
'string': {
'type': 'PURCHASE'|'RENEW'|'SYSTEM',
'offering': {
'id': 'string',
'description': 'string',
'type': 'RECURRING',
'platform': 'ANDROID'|'IOS',
'recurringCharges': [
{
'cost': {
'amount': 123.0,
'currencyCode': 'USD'
},
'frequency': 'MONTHLY'
},
]
},
'quantity': 123,
'effectiveOn': datetime(2015, 1, 1)
}
},
'nextToken': 'string'
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_project(arn=None):
"""
Gets information about a project.
See also: AWS API Documentation
Examples
The following example gets information about a specific project.
Expected Output:
:example: response = client.get_project(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The project's ARN.
:rtype: dict
:return: {
'project': {
'arn': 'string',
'name': 'string',
'defaultJobTimeoutMinutes': 123,
'created': datetime(2015, 1, 1)
}
}
"""
pass
def get_remote_access_session(arn=None):
"""
Returns a link to a currently running remote access session.
See also: AWS API Documentation
Examples
The following example gets a specific remote access session.
Expected Output:
:example: response = client.get_remote_access_session(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the remote access session about which you want to get session information.
:rtype: dict
:return: {
'remoteAccessSession': {
'arn': 'string',
'name': 'string',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'message': 'string',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
'billingMethod': 'METERED'|'UNMETERED',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
},
'endpoint': 'string'
}
}
:returns:
PENDING: A pending condition.
PASSED: A passing condition.
WARNED: A warning condition.
FAILED: A failed condition.
SKIPPED: A skipped condition.
ERRORED: An error condition.
STOPPED: A stopped condition.
"""
pass
def get_run(arn=None):
"""
Gets information about a run.
See also: AWS API Documentation
Examples
The following example gets information about a specific test run.
Expected Output:
:example: response = client.get_run(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The run's ARN.
:rtype: dict
:return: {
'run': {
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'platform': 'ANDROID'|'IOS',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'totalJobs': 123,
'completedJobs': 123,
'billingMethod': 'METERED'|'UNMETERED',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
},
'networkProfile': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'uplinkBandwidthBits': 123,
'downlinkBandwidthBits': 123,
'uplinkDelayMs': 123,
'downlinkDelayMs': 123,
'uplinkJitterMs': 123,
'downlinkJitterMs': 123,
'uplinkLossPercent': 123,
'downlinkLossPercent': 123
}
}
}
:returns:
ANDROID: The Android platform.
IOS: The iOS platform.
"""
pass
def get_suite(arn=None):
"""
Gets information about a suite.
See also: AWS API Documentation
Examples
The following example gets information about a specific test suite.
Expected Output:
:example: response = client.get_suite(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The suite's ARN.
:rtype: dict
:return: {
'suite': {
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
}
}
}
:returns:
PENDING: A pending status.
PENDING_CONCURRENCY: A pending concurrency status.
PENDING_DEVICE: A pending device status.
PROCESSING: A processing status.
SCHEDULING: A scheduling status.
PREPARING: A preparing status.
RUNNING: A running status.
COMPLETED: A completed status.
STOPPING: A stopping status.
"""
pass
def get_test(arn=None):
"""
Gets information about a test.
See also: AWS API Documentation
Examples
The following example gets information about a specific test.
Expected Output:
:example: response = client.get_test(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The test's ARN.
:rtype: dict
:return: {
'test': {
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
}
}
}
:returns:
PENDING: A pending status.
PENDING_CONCURRENCY: A pending concurrency status.
PENDING_DEVICE: A pending device status.
PROCESSING: A processing status.
SCHEDULING: A scheduling status.
PREPARING: A preparing status.
RUNNING: A running status.
COMPLETED: A completed status.
STOPPING: A stopping status.
"""
pass
def get_upload(arn=None):
"""
Gets information about an upload.
See also: AWS API Documentation
Examples
The following example gets information about a specific upload.
Expected Output:
:example: response = client.get_upload(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The upload's ARN.
:rtype: dict
:return: {
'upload': {
'arn': 'string',
'name': 'string',
'created': datetime(2015, 1, 1),
'type': 'ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE',
'status': 'INITIALIZED'|'PROCESSING'|'SUCCEEDED'|'FAILED',
'url': 'string',
'metadata': 'string',
'contentType': 'string',
'message': 'string'
}
}
:returns:
FAILED: A failed status.
INITIALIZED: An initialized status.
PROCESSING: A processing status.
SUCCEEDED: A succeeded status.
"""
pass
def get_waiter():
"""
"""
pass
def install_to_remote_access_session(remoteAccessSessionArn=None, appArn=None):
"""
Installs an application to the device in a remote access session. For Android applications, the file must be in .apk format. For iOS applications, the file must be in .ipa format.
See also: AWS API Documentation
Examples
The following example installs a specific app to a device in a specific remote access session.
Expected Output:
:example: response = client.install_to_remote_access_session(
remoteAccessSessionArn='string',
appArn='string'
)
:type remoteAccessSessionArn: string
:param remoteAccessSessionArn: [REQUIRED]
The Amazon Resource Name (ARN) of the remote access session about which you are requesting information.
:type appArn: string
:param appArn: [REQUIRED]
The Amazon Resource Name (ARN) of the app about which you are requesting information.
:rtype: dict
:return: {
'appUpload': {
'arn': 'string',
'name': 'string',
'created': datetime(2015, 1, 1),
'type': 'ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE',
'status': 'INITIALIZED'|'PROCESSING'|'SUCCEEDED'|'FAILED',
'url': 'string',
'metadata': 'string',
'contentType': 'string',
'message': 'string'
}
}
:returns:
ANDROID_APP: An Android upload.
IOS_APP: An iOS upload.
WEB_APP: A web appliction upload.
EXTERNAL_DATA: An external data upload.
APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.
APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.
APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.
APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.
APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.
APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload.
CALABASH_TEST_PACKAGE: A Calabash test package upload.
INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.
UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.
UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.
XCTEST_TEST_PACKAGE: An XCode test package upload.
XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.
"""
pass
def list_artifacts(arn=None, type=None, nextToken=None):
"""
Gets information about artifacts.
See also: AWS API Documentation
Examples
The following example lists screenshot artifacts for a specific run.
Expected Output:
:example: response = client.list_artifacts(
arn='string',
type='SCREENSHOT'|'FILE'|'LOG',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The Run, Job, Suite, or Test ARN.
:type type: string
:param type: [REQUIRED]
The artifacts' type.
Allowed values include:
FILE: The artifacts are files.
LOG: The artifacts are logs.
SCREENSHOT: The artifacts are screenshots.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'artifacts': [
{
'arn': 'string',
'name': 'string',
'type': 'UNKNOWN'|'SCREENSHOT'|'DEVICE_LOG'|'MESSAGE_LOG'|'VIDEO_LOG'|'RESULT_LOG'|'SERVICE_LOG'|'WEBKIT_LOG'|'INSTRUMENTATION_OUTPUT'|'EXERCISER_MONKEY_OUTPUT'|'CALABASH_JSON_OUTPUT'|'CALABASH_PRETTY_OUTPUT'|'CALABASH_STANDARD_OUTPUT'|'CALABASH_JAVA_XML_OUTPUT'|'AUTOMATION_OUTPUT'|'APPIUM_SERVER_OUTPUT'|'APPIUM_JAVA_OUTPUT'|'APPIUM_JAVA_XML_OUTPUT'|'APPIUM_PYTHON_OUTPUT'|'APPIUM_PYTHON_XML_OUTPUT'|'EXPLORER_EVENT_LOG'|'EXPLORER_SUMMARY_LOG'|'APPLICATION_CRASH_REPORT'|'XCTEST_LOG'|'VIDEO',
'extension': 'string',
'url': 'string'
},
],
'nextToken': 'string'
}
:returns:
UNKNOWN: An unknown type.
SCREENSHOT: The screenshot type.
DEVICE_LOG: The device log type.
MESSAGE_LOG: The message log type.
RESULT_LOG: The result log type.
SERVICE_LOG: The service log type.
WEBKIT_LOG: The web kit log type.
INSTRUMENTATION_OUTPUT: The instrumentation type.
EXERCISER_MONKEY_OUTPUT: For Android, the artifact (log) generated by an Android fuzz test.
CALABASH_JSON_OUTPUT: The Calabash JSON output type.
CALABASH_PRETTY_OUTPUT: The Calabash pretty output type.
CALABASH_STANDARD_OUTPUT: The Calabash standard output type.
CALABASH_JAVA_XML_OUTPUT: The Calabash Java XML output type.
AUTOMATION_OUTPUT: The automation output type.
APPIUM_SERVER_OUTPUT: The Appium server output type.
APPIUM_JAVA_OUTPUT: The Appium Java output type.
APPIUM_JAVA_XML_OUTPUT: The Appium Java XML output type.
APPIUM_PYTHON_OUTPUT: The Appium Python output type.
APPIUM_PYTHON_XML_OUTPUT: The Appium Python XML output type.
EXPLORER_EVENT_LOG: The Explorer event log output type.
EXPLORER_SUMMARY_LOG: The Explorer summary log output type.
APPLICATION_CRASH_REPORT: The application crash report output type.
XCTEST_LOG: The XCode test output type.
"""
pass
def list_device_pools(arn=None, type=None, nextToken=None):
"""
Gets information about device pools.
See also: AWS API Documentation
Examples
The following example returns information about the private device pools in a specific project.
Expected Output:
:example: response = client.list_device_pools(
arn='string',
type='CURATED'|'PRIVATE',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The project ARN.
:type type: string
:param type: The device pools' type.
Allowed values include:
CURATED: A device pool that is created and managed by AWS Device Farm.
PRIVATE: A device pool that is created and managed by the device pool developer.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'devicePools': [
{
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'rules': [
{
'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'APPIUM_VERSION',
'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',
'value': 'string'
},
]
},
],
'nextToken': 'string'
}
:returns:
CURATED: A device pool that is created and managed by AWS Device Farm.
PRIVATE: A device pool that is created and managed by the device pool developer.
"""
pass
def list_devices(arn=None, nextToken=None):
"""
Gets information about unique device types.
See also: AWS API Documentation
Examples
The following example returns information about the available devices in a specific project.
Expected Output:
:example: response = client.list_devices(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: The Amazon Resource Name (ARN) of the project.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'devices': [
{
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
],
'nextToken': 'string'
}
:returns:
PHONE: The phone form factor.
TABLET: The tablet form factor.
"""
pass
def list_jobs(arn=None, nextToken=None):
"""
Gets information about jobs.
See also: AWS API Documentation
Examples
The following example returns information about jobs in a specific project.
Expected Output:
:example: response = client.list_jobs(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The jobs' ARNs.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'jobs': [
{
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
}
},
],
'nextToken': 'string'
}
:returns:
BUILTIN_FUZZ: The built-in fuzz type.
BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.
APPIUM_JAVA_JUNIT: The Appium Java JUnit type.
APPIUM_JAVA_TESTNG: The Appium Java TestNG type.
APPIUM_PYTHON: The Appium Python type.
APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.
APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.
APPIUM_WEB_PYTHON: The Appium Python type for Web apps.
CALABASH: The Calabash type.
INSTRUMENTATION: The Instrumentation type.
UIAUTOMATION: The uiautomation type.
UIAUTOMATOR: The uiautomator type.
XCTEST: The XCode test type.
XCTEST_UI: The XCode UI test type.
"""
pass
def list_network_profiles(arn=None, type=None, nextToken=None):
"""
Returns the list of available network profiles.
See also: AWS API Documentation
:example: response = client.list_network_profiles(
arn='string',
type='CURATED'|'PRIVATE',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the project for which you want to list network profiles.
:type type: string
:param type: The type of network profile you wish to return information about. Valid values are listed below.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'networkProfiles': [
{
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'uplinkBandwidthBits': 123,
'downlinkBandwidthBits': 123,
'uplinkDelayMs': 123,
'downlinkDelayMs': 123,
'uplinkJitterMs': 123,
'downlinkJitterMs': 123,
'uplinkLossPercent': 123,
'downlinkLossPercent': 123
},
],
'nextToken': 'string'
}
"""
pass
def list_offering_promotions(nextToken=None):
"""
Returns a list of offering promotions. Each offering promotion record contains the ID and description of the promotion. The API returns a NotEligible error if the caller is not permitted to invoke the operation. Contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.
See also: AWS API Documentation
:example: response = client.list_offering_promotions(
nextToken='string'
)
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'offeringPromotions': [
{
'id': 'string',
'description': 'string'
},
],
'nextToken': 'string'
}
"""
pass
def list_offering_transactions(nextToken=None):
"""
Returns a list of all historical purchases, renewals, and system renewal transactions for an AWS account. The list is paginated and ordered by a descending timestamp (most recent transactions are first). The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.
See also: AWS API Documentation
Examples
The following example returns information about Device Farm offering transactions.
Expected Output:
:example: response = client.list_offering_transactions(
nextToken='string'
)
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'offeringTransactions': [
{
'offeringStatus': {
'type': 'PURCHASE'|'RENEW'|'SYSTEM',
'offering': {
'id': 'string',
'description': 'string',
'type': 'RECURRING',
'platform': 'ANDROID'|'IOS',
'recurringCharges': [
{
'cost': {
'amount': 123.0,
'currencyCode': 'USD'
},
'frequency': 'MONTHLY'
},
]
},
'quantity': 123,
'effectiveOn': datetime(2015, 1, 1)
},
'transactionId': 'string',
'offeringPromotionId': 'string',
'createdOn': datetime(2015, 1, 1),
'cost': {
'amount': 123.0,
'currencyCode': 'USD'
}
},
],
'nextToken': 'string'
}
"""
pass
def list_offerings(nextToken=None):
"""
Returns a list of products or offerings that the user can manage through the API. Each offering record indicates the recurring price per unit and the frequency for that offering. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.
See also: AWS API Documentation
Examples
The following example returns information about available device offerings.
Expected Output:
:example: response = client.list_offerings(
nextToken='string'
)
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'offerings': [
{
'id': 'string',
'description': 'string',
'type': 'RECURRING',
'platform': 'ANDROID'|'IOS',
'recurringCharges': [
{
'cost': {
'amount': 123.0,
'currencyCode': 'USD'
},
'frequency': 'MONTHLY'
},
]
},
],
'nextToken': 'string'
}
"""
pass
def list_projects(arn=None, nextToken=None):
"""
Gets information about projects.
See also: AWS API Documentation
Examples
The following example returns information about the specified project in Device Farm.
Expected Output:
:example: response = client.list_projects(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: Optional. If no Amazon Resource Name (ARN) is specified, then AWS Device Farm returns a list of all projects for the AWS account. You can also specify a project ARN.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'projects': [
{
'arn': 'string',
'name': 'string',
'defaultJobTimeoutMinutes': 123,
'created': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
"""
pass
def list_remote_access_sessions(arn=None, nextToken=None):
"""
Returns a list of all currently running remote access sessions.
See also: AWS API Documentation
Examples
The following example returns information about a specific Device Farm remote access session.
Expected Output:
:example: response = client.list_remote_access_sessions(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the remote access session about which you are requesting information.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'remoteAccessSessions': [
{
'arn': 'string',
'name': 'string',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'message': 'string',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
'billingMethod': 'METERED'|'UNMETERED',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
},
'endpoint': 'string'
},
],
'nextToken': 'string'
}
:returns:
PENDING: A pending status.
PENDING_CONCURRENCY: A pending concurrency status.
PENDING_DEVICE: A pending device status.
PROCESSING: A processing status.
SCHEDULING: A scheduling status.
PREPARING: A preparing status.
RUNNING: A running status.
COMPLETED: A completed status.
STOPPING: A stopping status.
"""
pass
def list_runs(arn=None, nextToken=None):
"""
Gets information about runs, given an AWS Device Farm project ARN.
See also: AWS API Documentation
Examples
The following example returns information about a specific test run.
Expected Output:
:example: response = client.list_runs(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the project for which you want to list runs.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'runs': [
{
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'platform': 'ANDROID'|'IOS',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'totalJobs': 123,
'completedJobs': 123,
'billingMethod': 'METERED'|'UNMETERED',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
},
'networkProfile': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'uplinkBandwidthBits': 123,
'downlinkBandwidthBits': 123,
'uplinkDelayMs': 123,
'downlinkDelayMs': 123,
'uplinkJitterMs': 123,
'downlinkJitterMs': 123,
'uplinkLossPercent': 123,
'downlinkLossPercent': 123
}
},
],
'nextToken': 'string'
}
:returns:
BUILTIN_FUZZ: The built-in fuzz type.
BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.
APPIUM_JAVA_JUNIT: The Appium Java JUnit type.
APPIUM_JAVA_TESTNG: The Appium Java TestNG type.
APPIUM_PYTHON: The Appium Python type.
APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.
APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.
APPIUM_WEB_PYTHON: The Appium Python type for Web apps.
CALABASH: The Calabash type.
INSTRUMENTATION: The Instrumentation type.
UIAUTOMATION: The uiautomation type.
UIAUTOMATOR: The uiautomator type.
XCTEST: The XCode test type.
XCTEST_UI: The XCode UI test type.
"""
pass
def list_samples(arn=None, nextToken=None):
"""
Gets information about samples, given an AWS Device Farm project ARN
See also: AWS API Documentation
Examples
The following example returns information about samples, given a specific Device Farm project.
Expected Output:
:example: response = client.list_samples(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the project for which you want to list samples.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'samples': [
{
'arn': 'string',
'type': 'CPU'|'MEMORY'|'THREADS'|'RX_RATE'|'TX_RATE'|'RX'|'TX'|'NATIVE_FRAMES'|'NATIVE_FPS'|'NATIVE_MIN_DRAWTIME'|'NATIVE_AVG_DRAWTIME'|'NATIVE_MAX_DRAWTIME'|'OPENGL_FRAMES'|'OPENGL_FPS'|'OPENGL_MIN_DRAWTIME'|'OPENGL_AVG_DRAWTIME'|'OPENGL_MAX_DRAWTIME',
'url': 'string'
},
],
'nextToken': 'string'
}
:returns:
CPU: A CPU sample type. This is expressed as the app processing CPU time (including child processes) as reported by process, as a percentage.
MEMORY: A memory usage sample type. This is expressed as the total proportional set size of an app process, in kilobytes.
NATIVE_AVG_DRAWTIME
NATIVE_FPS
NATIVE_FRAMES
NATIVE_MAX_DRAWTIME
NATIVE_MIN_DRAWTIME
OPENGL_AVG_DRAWTIME
OPENGL_FPS
OPENGL_FRAMES
OPENGL_MAX_DRAWTIME
OPENGL_MIN_DRAWTIME
RX
RX_RATE: The total number of bytes per second (TCP and UDP) that are sent, by app process.
THREADS: A threads sample type. This is expressed as the total number of threads per app process.
TX
TX_RATE: The total number of bytes per second (TCP and UDP) that are received, by app process.
"""
pass
def list_suites(arn=None, nextToken=None):
"""
Gets information about suites.
See also: AWS API Documentation
Examples
The following example returns information about suites, given a specific Device Farm project.
Expected Output:
:example: response = client.list_suites(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The suites' ARNs.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'suites': [
{
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
}
},
],
'nextToken': 'string'
}
:returns:
BUILTIN_FUZZ: The built-in fuzz type.
BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.
APPIUM_JAVA_JUNIT: The Appium Java JUnit type.
APPIUM_JAVA_TESTNG: The Appium Java TestNG type.
APPIUM_PYTHON: The Appium Python type.
APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.
APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.
APPIUM_WEB_PYTHON: The Appium Python type for Web apps.
CALABASH: The Calabash type.
INSTRUMENTATION: The Instrumentation type.
UIAUTOMATION: The uiautomation type.
UIAUTOMATOR: The uiautomator type.
XCTEST: The XCode test type.
XCTEST_UI: The XCode UI test type.
"""
pass
def list_tests(arn=None, nextToken=None):
"""
Gets information about tests.
See also: AWS API Documentation
Examples
The following example returns information about tests, given a specific Device Farm project.
Expected Output:
:example: response = client.list_tests(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The tests' ARNs.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'tests': [
{
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
}
},
],
'nextToken': 'string'
}
:returns:
BUILTIN_FUZZ: The built-in fuzz type.
BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.
APPIUM_JAVA_JUNIT: The Appium Java JUnit type.
APPIUM_JAVA_TESTNG: The Appium Java TestNG type.
APPIUM_PYTHON: The Appium Python type.
APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.
APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.
APPIUM_WEB_PYTHON: The Appium Python type for Web apps.
CALABASH: The Calabash type.
INSTRUMENTATION: The Instrumentation type.
UIAUTOMATION: The uiautomation type.
UIAUTOMATOR: The uiautomator type.
XCTEST: The XCode test type.
XCTEST_UI: The XCode UI test type.
"""
pass
def list_unique_problems(arn=None, nextToken=None):
"""
Gets information about unique problems.
See also: AWS API Documentation
Examples
The following example returns information about unique problems, given a specific Device Farm project.
Expected Output:
:example: response = client.list_unique_problems(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The unique problems' ARNs.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'uniqueProblems': {
'string': [
{
'message': 'string',
'problems': [
{
'run': {
'arn': 'string',
'name': 'string'
},
'job': {
'arn': 'string',
'name': 'string'
},
'suite': {
'arn': 'string',
'name': 'string'
},
'test': {
'arn': 'string',
'name': 'string'
},
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'message': 'string'
},
]
},
]
},
'nextToken': 'string'
}
:returns:
PENDING: A pending condition.
PASSED: A passing condition.
WARNED: A warning condition.
FAILED: A failed condition.
SKIPPED: A skipped condition.
ERRORED: An error condition.
STOPPED: A stopped condition.
"""
pass
def list_uploads(arn=None, nextToken=None):
"""
Gets information about uploads, given an AWS Device Farm project ARN.
See also: AWS API Documentation
Examples
The following example returns information about uploads, given a specific Device Farm project.
Expected Output:
:example: response = client.list_uploads(
arn='string',
nextToken='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the project for which you want to list uploads.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.
:rtype: dict
:return: {
'uploads': [
{
'arn': 'string',
'name': 'string',
'created': datetime(2015, 1, 1),
'type': 'ANDROID_APP'|'IOS_APP'|'WEB_APP'|'EXTERNAL_DATA'|'APPIUM_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_PYTHON_TEST_PACKAGE'|'APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE'|'APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE'|'APPIUM_WEB_PYTHON_TEST_PACKAGE'|'CALABASH_TEST_PACKAGE'|'INSTRUMENTATION_TEST_PACKAGE'|'UIAUTOMATION_TEST_PACKAGE'|'UIAUTOMATOR_TEST_PACKAGE'|'XCTEST_TEST_PACKAGE'|'XCTEST_UI_TEST_PACKAGE',
'status': 'INITIALIZED'|'PROCESSING'|'SUCCEEDED'|'FAILED',
'url': 'string',
'metadata': 'string',
'contentType': 'string',
'message': 'string'
},
],
'nextToken': 'string'
}
:returns:
ANDROID_APP: An Android upload.
IOS_APP: An iOS upload.
WEB_APP: A web appliction upload.
EXTERNAL_DATA: An external data upload.
APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.
APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.
APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.
APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.
APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.
APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload.
CALABASH_TEST_PACKAGE: A Calabash test package upload.
INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.
UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.
UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.
XCTEST_TEST_PACKAGE: An XCode test package upload.
XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.
"""
pass
def purchase_offering(offeringId=None, quantity=None, offeringPromotionId=None):
"""
Immediately purchases offerings for an AWS account. Offerings renew with the latest total purchased quantity for an offering, unless the renewal was overridden. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.
See also: AWS API Documentation
Examples
The following example purchases a specific device slot offering.
Expected Output:
:example: response = client.purchase_offering(
offeringId='string',
quantity=123,
offeringPromotionId='string'
)
:type offeringId: string
:param offeringId: The ID of the offering.
:type quantity: integer
:param quantity: The number of device slots you wish to purchase in an offering request.
:type offeringPromotionId: string
:param offeringPromotionId: The ID of the offering promotion to be applied to the purchase.
:rtype: dict
:return: {
'offeringTransaction': {
'offeringStatus': {
'type': 'PURCHASE'|'RENEW'|'SYSTEM',
'offering': {
'id': 'string',
'description': 'string',
'type': 'RECURRING',
'platform': 'ANDROID'|'IOS',
'recurringCharges': [
{
'cost': {
'amount': 123.0,
'currencyCode': 'USD'
},
'frequency': 'MONTHLY'
},
]
},
'quantity': 123,
'effectiveOn': datetime(2015, 1, 1)
},
'transactionId': 'string',
'offeringPromotionId': 'string',
'createdOn': datetime(2015, 1, 1),
'cost': {
'amount': 123.0,
'currencyCode': 'USD'
}
}
}
"""
pass
def renew_offering(offeringId=None, quantity=None):
"""
Explicitly sets the quantity of devices to renew for an offering, starting from the effectiveDate of the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.
See also: AWS API Documentation
Examples
The following example renews a specific device slot offering.
Expected Output:
:example: response = client.renew_offering(
offeringId='string',
quantity=123
)
:type offeringId: string
:param offeringId: The ID of a request to renew an offering.
:type quantity: integer
:param quantity: The quantity requested in an offering renewal.
:rtype: dict
:return: {
'offeringTransaction': {
'offeringStatus': {
'type': 'PURCHASE'|'RENEW'|'SYSTEM',
'offering': {
'id': 'string',
'description': 'string',
'type': 'RECURRING',
'platform': 'ANDROID'|'IOS',
'recurringCharges': [
{
'cost': {
'amount': 123.0,
'currencyCode': 'USD'
},
'frequency': 'MONTHLY'
},
]
},
'quantity': 123,
'effectiveOn': datetime(2015, 1, 1)
},
'transactionId': 'string',
'offeringPromotionId': 'string',
'createdOn': datetime(2015, 1, 1),
'cost': {
'amount': 123.0,
'currencyCode': 'USD'
}
}
}
"""
pass
def schedule_run(projectArn=None, appArn=None, devicePoolArn=None, name=None, test=None, configuration=None, executionConfiguration=None):
"""
Schedules a run.
See also: AWS API Documentation
Examples
The following example schedules a test run named MyRun.
Expected Output:
:example: response = client.schedule_run(
projectArn='string',
appArn='string',
devicePoolArn='string',
name='string',
test={
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'testPackageArn': 'string',
'filter': 'string',
'parameters': {
'string': 'string'
}
},
configuration={
'extraDataPackageArn': 'string',
'networkProfileArn': 'string',
'locale': 'string',
'location': {
'latitude': 123.0,
'longitude': 123.0
},
'radios': {
'wifi': True|False,
'bluetooth': True|False,
'nfc': True|False,
'gps': True|False
},
'auxiliaryApps': [
'string',
],
'billingMethod': 'METERED'|'UNMETERED'
},
executionConfiguration={
'jobTimeoutMinutes': 123,
'accountsCleanup': True|False,
'appPackagesCleanup': True|False
}
)
:type projectArn: string
:param projectArn: [REQUIRED]
The ARN of the project for the run to be scheduled.
:type appArn: string
:param appArn: The ARN of the app to schedule a run.
:type devicePoolArn: string
:param devicePoolArn: [REQUIRED]
The ARN of the device pool for the run to be scheduled.
:type name: string
:param name: The name for the run to be scheduled.
:type test: dict
:param test: [REQUIRED]
Information about the test for the run to be scheduled.
type (string) -- [REQUIRED]The test's type.
Must be one of the following values:
BUILTIN_FUZZ: The built-in fuzz type.
BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.
APPIUM_JAVA_JUNIT: The Appium Java JUnit type.
APPIUM_JAVA_TESTNG: The Appium Java TestNG type.
APPIUM_PYTHON: The Appium Python type.
APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.
APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.
APPIUM_WEB_PYTHON: The Appium Python type for Web apps.
CALABASH: The Calabash type.
INSTRUMENTATION: The Instrumentation type.
UIAUTOMATION: The uiautomation type.
UIAUTOMATOR: The uiautomator type.
XCTEST: The XCode test type.
XCTEST_UI: The XCode UI test type.
testPackageArn (string) --The ARN of the uploaded test that will be run.
filter (string) --The test's filter.
parameters (dict) --The test's parameters, such as the following test framework parameters and fixture settings:
For Calabash tests:
profile: A cucumber profile, for example, 'my_profile_name'.
tags: You can limit execution to features or scenarios that have (or don't have) certain tags, for example, '@smoke' or '@smoke,~@wip'.
For Appium tests (all types):
appium_version: The Appium version. Currently supported values are '1.4.16', '1.6.3', 'latest', and 'default'.
latest will run the latest Appium version supported by Device Farm (1.6.3).
For default , Device Farm will choose a compatible version of Appium for the device. The current behavior is to run 1.4.16 on Android devices and iOS 9 and earlier, 1.6.3 for iOS 10 and later.
This behavior is subject to change.
For Fuzz tests (Android only):
event_count: The number of events, between 1 and 10000, that the UI fuzz test should perform.
throttle: The time, in ms, between 0 and 1000, that the UI fuzz test should wait between events.
seed: A seed to use for randomizing the UI fuzz test. Using the same seed value between tests ensures identical event sequences.
For Explorer tests:
username: A username to use if the Explorer encounters a login form. If not supplied, no username will be inserted.
password: A password to use if the Explorer encounters a login form. If not supplied, no password will be inserted.
For Instrumentation:
filter: A test filter string. Examples:
Running a single test case: 'com.android.abc.Test1'
Running a single test: 'com.android.abc.Test1#smoke'
Running multiple tests: 'com.android.abc.Test1,com.android.abc.Test2'
For XCTest and XCTestUI:
filter: A test filter string. Examples:
Running a single test class: 'LoginTests'
Running a multiple test classes: 'LoginTests,SmokeTests'
Running a single test: 'LoginTests/testValid'
Running multiple tests: 'LoginTests/testValid,LoginTests/testInvalid'
For UIAutomator:
filter: A test filter string. Examples:
Running a single test case: 'com.android.abc.Test1'
Running a single test: 'com.android.abc.Test1#smoke'
Running multiple tests: 'com.android.abc.Test1,com.android.abc.Test2'
(string) --
(string) --
:type configuration: dict
:param configuration: Information about the settings for the run to be scheduled.
extraDataPackageArn (string) --The ARN of the extra data for the run. The extra data is a .zip file that AWS Device Farm will extract to external data for Android or the app's sandbox for iOS.
networkProfileArn (string) --Reserved for internal use.
locale (string) --Information about the locale that is used for the run.
location (dict) --Information about the location that is used for the run.
latitude (float) -- [REQUIRED]The latitude.
longitude (float) -- [REQUIRED]The longitude.
radios (dict) --Information about the radio states for the run.
wifi (boolean) --True if Wi-Fi is enabled at the beginning of the test; otherwise, false.
bluetooth (boolean) --True if Bluetooth is enabled at the beginning of the test; otherwise, false.
nfc (boolean) --True if NFC is enabled at the beginning of the test; otherwise, false.
gps (boolean) --True if GPS is enabled at the beginning of the test; otherwise, false.
auxiliaryApps (list) --A list of auxiliary apps for the run.
(string) --
billingMethod (string) --Specifies the billing method for a test run: metered or unmetered . If the parameter is not specified, the default value is metered .
:type executionConfiguration: dict
:param executionConfiguration: Specifies configuration information about a test run, such as the execution timeout (in minutes).
jobTimeoutMinutes (integer) --The number of minutes a test run will execute before it times out.
accountsCleanup (boolean) --True if account cleanup is enabled at the beginning of the test; otherwise, false.
appPackagesCleanup (boolean) --True if app package cleanup is enabled at the beginning of the test; otherwise, false.
:rtype: dict
:return: {
'run': {
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'platform': 'ANDROID'|'IOS',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'totalJobs': 123,
'completedJobs': 123,
'billingMethod': 'METERED'|'UNMETERED',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
},
'networkProfile': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'uplinkBandwidthBits': 123,
'downlinkBandwidthBits': 123,
'uplinkDelayMs': 123,
'downlinkDelayMs': 123,
'uplinkJitterMs': 123,
'downlinkJitterMs': 123,
'uplinkLossPercent': 123,
'downlinkLossPercent': 123
}
}
}
:returns:
BUILTIN_FUZZ: The built-in fuzz type.
BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.
APPIUM_JAVA_JUNIT: The Appium Java JUnit type.
APPIUM_JAVA_TESTNG: The Appium Java TestNG type.
APPIUM_PYTHON: The Appium Python type.
APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.
APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.
APPIUM_WEB_PYTHON: The Appium Python type for Web apps.
CALABASH: The Calabash type.
INSTRUMENTATION: The Instrumentation type.
UIAUTOMATION: The uiautomation type.
UIAUTOMATOR: The uiautomator type.
XCTEST: The XCode test type.
XCTEST_UI: The XCode UI test type.
"""
pass
def stop_remote_access_session(arn=None):
"""
Ends a specified remote access session.
See also: AWS API Documentation
:example: response = client.stop_remote_access_session(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the remote access session you wish to stop.
:rtype: dict
:return: {
'remoteAccessSession': {
'arn': 'string',
'name': 'string',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'message': 'string',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'device': {
'arn': 'string',
'name': 'string',
'manufacturer': 'string',
'model': 'string',
'formFactor': 'PHONE'|'TABLET',
'platform': 'ANDROID'|'IOS',
'os': 'string',
'cpu': {
'frequency': 'string',
'architecture': 'string',
'clock': 123.0
},
'resolution': {
'width': 123,
'height': 123
},
'heapSize': 123,
'memory': 123,
'image': 'string',
'carrier': 'string',
'radio': 'string',
'remoteAccessEnabled': True|False,
'fleetType': 'string',
'fleetName': 'string'
},
'billingMethod': 'METERED'|'UNMETERED',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
},
'endpoint': 'string'
}
}
:returns:
PENDING: A pending condition.
PASSED: A passing condition.
WARNED: A warning condition.
FAILED: A failed condition.
SKIPPED: A skipped condition.
ERRORED: An error condition.
STOPPED: A stopped condition.
"""
pass
def stop_run(arn=None):
"""
Initiates a stop request for the current test run. AWS Device Farm will immediately stop the run on devices where tests have not started executing, and you will not be billed for these devices. On devices where tests have started executing, Setup Suite and Teardown Suite tests will run to completion before stopping execution on those devices. You will be billed for Setup, Teardown, and any tests that were in progress or already completed.
See also: AWS API Documentation
Examples
The following example stops a specific test run.
Expected Output:
:example: response = client.stop_run(
arn='string'
)
:type arn: string
:param arn: [REQUIRED]
Represents the Amazon Resource Name (ARN) of the Device Farm run you wish to stop.
:rtype: dict
:return: {
'run': {
'arn': 'string',
'name': 'string',
'type': 'BUILTIN_FUZZ'|'BUILTIN_EXPLORER'|'APPIUM_JAVA_JUNIT'|'APPIUM_JAVA_TESTNG'|'APPIUM_PYTHON'|'APPIUM_WEB_JAVA_JUNIT'|'APPIUM_WEB_JAVA_TESTNG'|'APPIUM_WEB_PYTHON'|'CALABASH'|'INSTRUMENTATION'|'UIAUTOMATION'|'UIAUTOMATOR'|'XCTEST'|'XCTEST_UI',
'platform': 'ANDROID'|'IOS',
'created': datetime(2015, 1, 1),
'status': 'PENDING'|'PENDING_CONCURRENCY'|'PENDING_DEVICE'|'PROCESSING'|'SCHEDULING'|'PREPARING'|'RUNNING'|'COMPLETED'|'STOPPING',
'result': 'PENDING'|'PASSED'|'WARNED'|'FAILED'|'SKIPPED'|'ERRORED'|'STOPPED',
'started': datetime(2015, 1, 1),
'stopped': datetime(2015, 1, 1),
'counters': {
'total': 123,
'passed': 123,
'failed': 123,
'warned': 123,
'errored': 123,
'stopped': 123,
'skipped': 123
},
'message': 'string',
'totalJobs': 123,
'completedJobs': 123,
'billingMethod': 'METERED'|'UNMETERED',
'deviceMinutes': {
'total': 123.0,
'metered': 123.0,
'unmetered': 123.0
},
'networkProfile': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'uplinkBandwidthBits': 123,
'downlinkBandwidthBits': 123,
'uplinkDelayMs': 123,
'downlinkDelayMs': 123,
'uplinkJitterMs': 123,
'downlinkJitterMs': 123,
'uplinkLossPercent': 123,
'downlinkLossPercent': 123
}
}
}
:returns:
ANDROID: The Android platform.
IOS: The iOS platform.
"""
pass
def update_device_pool(arn=None, name=None, description=None, rules=None):
"""
Modifies the name, description, and rules in a device pool given the attributes and the pool ARN. Rule updates are all-or-nothing, meaning they can only be updated as a whole (or not at all).
See also: AWS API Documentation
Examples
The following example updates the specified device pool with a new name and description. It also enables remote access of devices in the device pool.
Expected Output:
:example: response = client.update_device_pool(
arn='string',
name='string',
description='string',
rules=[
{
'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'APPIUM_VERSION',
'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',
'value': 'string'
},
]
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resourc Name (ARN) of the Device Farm device pool you wish to update.
:type name: string
:param name: A string representing the name of the device pool you wish to update.
:type description: string
:param description: A description of the device pool you wish to update.
:type rules: list
:param rules: Represents the rules you wish to modify for the device pool. Updating rules is optional; however, if you choose to update rules for your request, the update will replace the existing rules.
(dict) --Represents a condition for a device pool.
attribute (string) --The rule's stringified attribute. For example, specify the value as '\'abc\'' .
Allowed values include:
ARN: The ARN.
FORM_FACTOR: The form factor (for example, phone or tablet).
MANUFACTURER: The manufacturer.
PLATFORM: The platform (for example, Android or iOS).
REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access.
APPIUM_VERSION: The Appium version for the test.
operator (string) --The rule's operator.
EQUALS: The equals operator.
GREATER_THAN: The greater-than operator.
IN: The in operator.
LESS_THAN: The less-than operator.
NOT_IN: The not-in operator.
CONTAINS: The contains operator.
value (string) --The rule's value.
:rtype: dict
:return: {
'devicePool': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'rules': [
{
'attribute': 'ARN'|'PLATFORM'|'FORM_FACTOR'|'MANUFACTURER'|'REMOTE_ACCESS_ENABLED'|'APPIUM_VERSION',
'operator': 'EQUALS'|'LESS_THAN'|'GREATER_THAN'|'IN'|'NOT_IN'|'CONTAINS',
'value': 'string'
},
]
}
}
:returns:
CURATED: A device pool that is created and managed by AWS Device Farm.
PRIVATE: A device pool that is created and managed by the device pool developer.
"""
pass
def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):
"""
Updates the network profile with specific settings.
See also: AWS API Documentation
:example: response = client.update_network_profile(
arn='string',
name='string',
description='string',
type='CURATED'|'PRIVATE',
uplinkBandwidthBits=123,
downlinkBandwidthBits=123,
uplinkDelayMs=123,
downlinkDelayMs=123,
uplinkJitterMs=123,
downlinkJitterMs=123,
uplinkLossPercent=123,
downlinkLossPercent=123
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the project that you wish to update network profile settings.
:type name: string
:param name: The name of the network profile about which you are returning information.
:type description: string
:param description: The descriptoin of the network profile about which you are returning information.
:type type: string
:param type: The type of network profile you wish to return information about. Valid values are listed below.
:type uplinkBandwidthBits: integer
:param uplinkBandwidthBits: The data throughput rate in bits per second, as an integer from 0 to 104857600.
:type downlinkBandwidthBits: integer
:param downlinkBandwidthBits: The data throughput rate in bits per second, as an integer from 0 to 104857600.
:type uplinkDelayMs: integer
:param uplinkDelayMs: Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.
:type downlinkDelayMs: integer
:param downlinkDelayMs: Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.
:type uplinkJitterMs: integer
:param uplinkJitterMs: Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.
:type downlinkJitterMs: integer
:param downlinkJitterMs: Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.
:type uplinkLossPercent: integer
:param uplinkLossPercent: Proportion of transmitted packets that fail to arrive from 0 to 100 percent.
:type downlinkLossPercent: integer
:param downlinkLossPercent: Proportion of received packets that fail to arrive from 0 to 100 percent.
:rtype: dict
:return: {
'networkProfile': {
'arn': 'string',
'name': 'string',
'description': 'string',
'type': 'CURATED'|'PRIVATE',
'uplinkBandwidthBits': 123,
'downlinkBandwidthBits': 123,
'uplinkDelayMs': 123,
'downlinkDelayMs': 123,
'uplinkJitterMs': 123,
'downlinkJitterMs': 123,
'uplinkLossPercent': 123,
'downlinkLossPercent': 123
}
}
"""
pass
def update_project(arn=None, name=None, defaultJobTimeoutMinutes=None):
"""
Modifies the specified project name, given the project ARN and a new name.
See also: AWS API Documentation
Examples
The following example updates the specified project with a new name.
Expected Output:
:example: response = client.update_project(
arn='string',
name='string',
defaultJobTimeoutMinutes=123
)
:type arn: string
:param arn: [REQUIRED]
The Amazon Resource Name (ARN) of the project whose name you wish to update.
:type name: string
:param name: A string representing the new name of the project that you are updating.
:type defaultJobTimeoutMinutes: integer
:param defaultJobTimeoutMinutes: The number of minutes a test run in the project will execute before it times out.
:rtype: dict
:return: {
'project': {
'arn': 'string',
'name': 'string',
'defaultJobTimeoutMinutes': 123,
'created': datetime(2015, 1, 1)
}
}
"""
pass
| 35.788278
| 510
| 0.565828
|
31183521bb99403915e6e554582d84246561310d
| 2,447
|
py
|
Python
|
doc/source/conf.py
|
kumulustech/browbeat
|
deb3bfc26481ff0bc3ac2633dbf38a26ef9aa6fc
|
[
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
kumulustech/browbeat
|
deb3bfc26481ff0bc3ac2633dbf38a26ef9aa6fc
|
[
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
kumulustech/browbeat
|
deb3bfc26481ff0bc3ac2633dbf38a26ef9aa6fc
|
[
"Apache-2.0"
] | 1
|
2022-01-19T14:00:08.000Z
|
2022-01-19T14:00:08.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
# 'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'browbeat'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
| 33.067568
| 79
| 0.695137
|
1fff9e15128d9eb23e99b4fcc48ced167499afc9
| 2,097
|
py
|
Python
|
test/functional/rpc_estimatefee.py
|
azsxcv11060/bitcoin
|
086dae9878680608e189b9aac2a605254d3b4395
|
[
"MIT"
] | 13
|
2019-03-21T03:55:52.000Z
|
2022-01-06T17:49:16.000Z
|
test/functional/rpc_estimatefee.py
|
azsxcv11060/bitcoin
|
086dae9878680608e189b9aac2a605254d3b4395
|
[
"MIT"
] | 18
|
2020-10-31T01:04:18.000Z
|
2020-11-03T19:25:27.000Z
|
test/functional/rpc_estimatefee.py
|
azsxcv11060/bitcoin
|
086dae9878680608e189b9aac2a605254d3b4395
|
[
"MIT"
] | 5
|
2019-12-17T23:52:00.000Z
|
2021-06-13T20:39:56.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the estimatefee RPCs.
Test the following RPCs:
- estimatesmartfee
- estimaterawfee
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error
class EstimateFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def run_test(self):
# missing required params
assert_raises_rpc_error(-1, "estimatesmartfee", self.nodes[0].estimatesmartfee)
assert_raises_rpc_error(-1, "estimaterawfee", self.nodes[0].estimaterawfee)
# wrong type for conf_target
assert_raises_rpc_error(-3, "Expected type number, got string", self.nodes[0].estimatesmartfee, 'foo')
assert_raises_rpc_error(-3, "Expected type number, got string", self.nodes[0].estimaterawfee, 'foo')
# wrong type for estimatesmartfee(estimate_mode)
assert_raises_rpc_error(-3, "Expected type string, got number", self.nodes[0].estimatesmartfee, 1, 1)
assert_raises_rpc_error(-8, "Invalid estimate_mode parameter", self.nodes[0].estimatesmartfee, 1, 'foo')
# wrong type for estimaterawfee(threshold)
assert_raises_rpc_error(-3, "Expected type number, got string", self.nodes[0].estimaterawfee, 1, 'foo')
# extra params
assert_raises_rpc_error(-1, "estimatesmartfee", self.nodes[0].estimatesmartfee, 1, 'ECONOMICAL', 1)
assert_raises_rpc_error(-1, "estimaterawfee", self.nodes[0].estimaterawfee, 1, 1, 1)
# valid calls
self.nodes[0].estimatesmartfee(1)
# self.nodes[0].estimatesmartfee(1, None)
self.nodes[0].estimatesmartfee(1, 'ECONOMICAL')
self.nodes[0].estimaterawfee(1)
self.nodes[0].estimaterawfee(1, None)
self.nodes[0].estimaterawfee(1, 1)
if __name__ == '__main__':
EstimateFeeTest().main()
| 40.326923
| 112
| 0.709585
|
f1af1dc27b7c003b938479501b6fa7b936a43b35
| 936
|
py
|
Python
|
src/python/example.py
|
barrettotte/IBMi-Book
|
911e7389ff80eae72e88cacc4a19dc2b7ede0126
|
[
"MIT"
] | 24
|
2019-08-19T23:31:52.000Z
|
2022-03-25T12:37:13.000Z
|
src/python/example.py
|
barrettotte/IBMi-Book
|
911e7389ff80eae72e88cacc4a19dc2b7ede0126
|
[
"MIT"
] | 1
|
2019-11-04T14:38:03.000Z
|
2019-11-11T14:06:18.000Z
|
src/python/example.py
|
barrettotte/IBMi-Book
|
911e7389ff80eae72e88cacc4a19dc2b7ede0126
|
[
"MIT"
] | 9
|
2019-12-09T14:35:21.000Z
|
2021-12-25T10:02:29.000Z
|
import os, json, getpass, pyodbc
# Get credentials
with open(os.path.abspath('config.json'), 'r') as f:
config = json.load(f)
host = config['host'] if 'host' in config else input("Enter host: ")
user = config['user'] if 'user' in config else input("Enter user: ")
pwd = getpass.getpass('Enter password: ')
# Init ODBC connection and cursor
conn = pyodbc.connect(driver='{IBM i Access ODBC Driver}', system=host, uid=user, pwd=pwd)
csr = conn.cursor()
try:
# Execute SQL string
csr.execute(' '.join([
"SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_PARTITION, SOURCE_TYPE",
"FROM QSYS2.SYSPARTITIONSTAT WHERE TABLE_SCHEMA = 'BOLIB'",
"ORDER BY TABLE_PARTITION"
]))
# Output result set
for row in csr:
print(row)
except Exception as e:
print('Error occurred with DB2 query\n ' + str(e))
finally:
# Close cursor and ODBC connection
csr.close()
conn.close()
| 31.2
| 90
| 0.653846
|
1a20018810aca71e231531dd6b4c27f07d98ddd0
| 289
|
py
|
Python
|
gadget/reboot.py
|
vaginessa/RaspberryPiZero_HID_MultiTool
|
c6227c7263cb1321a5655f938462392eb014a352
|
[
"Apache-2.0"
] | 54
|
2017-01-06T21:43:40.000Z
|
2022-02-14T02:57:57.000Z
|
gadget/reboot.py
|
vaginessa/RaspberryPiZero_HID_MultiTool
|
c6227c7263cb1321a5655f938462392eb014a352
|
[
"Apache-2.0"
] | null | null | null |
gadget/reboot.py
|
vaginessa/RaspberryPiZero_HID_MultiTool
|
c6227c7263cb1321a5655f938462392eb014a352
|
[
"Apache-2.0"
] | 13
|
2017-01-31T23:35:21.000Z
|
2021-12-22T12:48:59.000Z
|
#!/usr/bin/python
import RPi.GPIO as GPIO
import os
gpio_pin_number=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpio_pin_number, GPIO.IN, pull_up_down=GPIO.PUD_UP)
try:
GPIO.wait_for_edge(gpio_pin_number, GPIO.FALLING)
os.system("sudo shutdown -h now")
except:
pass
GPIO.cleanup()
| 18.0625
| 62
| 0.750865
|
3846012a8288d097b318f7c4f1e2572ac5b6d32e
| 367
|
py
|
Python
|
pyFTracks/ressources/__init__.py
|
underworldcode/pyFTracks
|
6050a4327616ebca7ab932b609b25c7c4e6a62f8
|
[
"MIT"
] | 4
|
2020-11-02T03:54:52.000Z
|
2022-03-04T11:48:26.000Z
|
pyFTracks/ressources/__init__.py
|
rbeucher/pyFTracks
|
6050a4327616ebca7ab932b609b25c7c4e6a62f8
|
[
"MIT"
] | 12
|
2020-03-05T04:04:46.000Z
|
2020-03-05T23:27:57.000Z
|
pyFTracks/ressources/__init__.py
|
ryanstoner1/pyFTracks
|
6050a4327616ebca7ab932b609b25c7c4e6a62f8
|
[
"MIT"
] | 2
|
2020-12-29T01:59:07.000Z
|
2021-10-15T11:22:57.000Z
|
import pandas as pd
from pathlib import Path
#Miller1995 = pd.read_hdf((Path(__file__).parent / "Miller1995.h5"), "data")
#Gleadow = pd.read_hdf((Path(__file__).parent / "Gleadow.h5"), "data")
from pyFTracks import Sample
Miller = Sample().read_from_hdf5(Path(__file__).parent / "Miller.h5")
Gleadow = Sample().read_from_hdf5(Path(__file__).parent / "Gleadow.h5")
| 33.363636
| 76
| 0.735695
|
9b128afd12ce9834be20a300379f8918f86a1510
| 1,624
|
py
|
Python
|
utils/ds_select_random_pc.py
|
Weafre/VoxelDNN_v2
|
9fa14eeb5a72ac982f020fa983e2eb9f8a09775d
|
[
"MIT"
] | 5
|
2021-08-09T08:45:24.000Z
|
2022-03-27T02:41:03.000Z
|
utils/ds_select_random_pc.py
|
Weafre/VoxelDNN_v2
|
9fa14eeb5a72ac982f020fa983e2eb9f8a09775d
|
[
"MIT"
] | 1
|
2021-09-14T09:54:29.000Z
|
2021-09-14T09:54:29.000Z
|
utils/ds_select_random_pc.py
|
Weafre/VoxelDNN_v2
|
9fa14eeb5a72ac982f020fa983e2eb9f8a09775d
|
[
"MIT"
] | 1
|
2021-08-13T09:43:48.000Z
|
2021-08-13T09:43:48.000Z
|
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
import os
import argparse
import shutil
from os import makedirs
from glob import glob
from tqdm import tqdm
import random
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='ds_select_largest.py',
description='Converts a folder containing meshes to point clouds',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('source', help='Source directory')
parser.add_argument('dest', help='Destination directory')
parser.add_argument('n', help='Number of largest files to keep.', type=int)
args = parser.parse_args()
assert os.path.exists(args.source), f'{args.source} does not exist'
assert args.n > 0
paths = glob(os.path.join(args.source, '**', f'*'), recursive=True)
paths = [x for x in paths if os.path.isfile(x)]
files = [x[len(args.source) + 1:] for x in paths]
files_len = len(files)
assert files_len > 0
logger.info(f'Found {files_len} models in {args.source}')
files_with_paths = random.sample(list(zip(files, paths)), args.n)
for file, path in tqdm(files_with_paths):
target_path = os.path.join(args.dest, file)
target_folder, _ = os.path.split(target_path)
makedirs(target_folder, exist_ok=True)
# shutil.copyfile(path, target_path)
os.symlink(path, target_path)
logger.info(f'{files_len} models to {args.dest}')
| 31.843137
| 90
| 0.682266
|
06d0f289cb914aa6b9f89eb9427f99ae4875d117
| 8,997
|
py
|
Python
|
NLP_Flask/app/nlp/preprocess/vector_models.py
|
Gxy-2001/NLPPlat
|
f339c1a7452b979b413919cf4bc128bf45af52ef
|
[
"MIT"
] | null | null | null |
NLP_Flask/app/nlp/preprocess/vector_models.py
|
Gxy-2001/NLPPlat
|
f339c1a7452b979b413919cf4bc128bf45af52ef
|
[
"MIT"
] | null | null | null |
NLP_Flask/app/nlp/preprocess/vector_models.py
|
Gxy-2001/NLPPlat
|
f339c1a7452b979b413919cf4bc128bf45af52ef
|
[
"MIT"
] | null | null | null |
from gensim.models import word2vec as wc
from gensim.models import doc2vec as dc
from manage import app
from app.utils.file_utils import getFileURL
def Word2vec(data, params, type):
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append(vector['text1'])
if 'text2' in type:
sentences.append(vector['text2'])
if (params['max_vocab_size'] == 'None'):
params['max_vocab_size'] = None
else:
params['max_vocab_size'] = int(params['max_vocab_size'])
if (params['trim_rule'] == 'None'):
params['trim_rule'] = None
else:
params['trim_rule'] = int(params['trim_rule'])
model = wc.Word2Vec(sentences, size=int(params['size']), alpha=int(params['alpha']), window=int(params['window']),
min_count=int(params['min_count']), max_vocab_size=params['max_vocab_size'],
sample=int(params['sample']), seed=int(params['seed']), workers=int(params['workers']),
min_alpha=int(params['min_alpha']), sg=int(params['sg']), hs=int(params['hs']),
negative=int(params['negative']), cbow_mean=int(params['cbow_mean']),
hashfxn=hash, iter=int(params['iter']),
trim_rule=params['trim_rule'], sorted_vocab=int(params['sorted_vocab']),
batch_words=int(params['batch_words']))
modelURL = getFileURL('word2vec.txt', app)
model.wv.save_word2vec_format(modelURL, binary=False)
data['embedding'] = modelURL
return data
from pyspark.ml.feature import Word2Vec
from pyspark.sql import SparkSession
def Word2vecSpark(data, params, type):
spark = SparkSession.builder.appName("word2vec").config("master", "local[*]").enableHiveSupport().getOrCreate()
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append((vector['text1'],))
if 'text2' in type:
sentences.append((vector['text2'],))
df_document = spark.createDataFrame(sentences, ["text"])
if (params['max_vocab_size'] == 'None'):
params['max_vocab_size'] = None
else:
params['max_vocab_size'] = int(params['max_vocab_size'])
word2Vec = Word2Vec(vectorSize=int(params['size']), minCount=int(params['min_count']),
maxIter=int(params['iter']), seed=int(params['seed']),
windowSize=int(params['window']), maxSentenceLength=params['max_vocab_size'])
model = word2Vec.fit(df_document)
df_vector = model.transform(df_document)
embedding = []
for row in df_vector.collect():
embedding.append(row[1].array)
data['embedding'] = embedding
def Doc2vec(data, params, type):
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append(vector['text1'])
if 'text2' in type:
sentences.append(vector['text2'])
if (params['max_vocab_size'] == 'None'):
params['max_vocab_size'] = None
else:
params['max_vocab_size'] = int(params['max_vocab_size'])
document = []
for i, sentence in enumerate(sentences):
document.append(dc.TaggedDocument(sentence, [i]))
model = dc.Doc2Vec(document, size=int(params['size']), alpha=int(params['alpha']), window=int(params['window']),
min_count=int(params['min_count']), max_vocab_size=params['max_vocab_size'],
sample=int(params['sample']), seed=int(params['seed']), workers=int(params['workers']))
modelURL = getFileURL('doc2vec.txt', app)
model.wv.save_word2vec_format(modelURL, binary=False)
data['embedding'] = modelURL
return data
from sklearn.feature_extraction.text import TfidfVectorizer
def myTFIDF(data, params, type):
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append(vector['text1'])
if 'text2' in type:
sentences.append(vector['text2'])
tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
tv_fit = tv.fit_transform(sentences)
data['embedding'] = tv_fit.toarray()
return data
from pyspark.ml.feature import CountVectorizer
from pyspark.ml.feature import CountVectorizerModel
from pyspark.ml.feature import IDF
from pyspark.ml.feature import IDFModel
def myTFIDFSpark(data, params, type):
spark = SparkSession.builder.appName("word2vec").config("master", "local[*]").enableHiveSupport().getOrCreate()
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append((vector['text1'],))
if 'text2' in type:
sentences.append((vector['text2'],))
df_document = spark.createDataFrame(sentences, ["text"])
cv = CountVectorizer(inputCol="words", outputCol="countFeatures", vocabSize=200 * 10000, minDF=1.0)
# 训练词频统计模型
cv_model = cv.fit(df_document)
cv_model.write().overwrite().save("app/models/MLmodel/CV.model")
cv_model = CountVectorizerModel.load("app/models/MLmodel/CV.model")
# 得出词频向量结果
cv_result = cv_model.transform(df_document)
idf = IDF(inputCol="countFeatures", outputCol="idfFeatures")
idf_model = idf.fit(cv_result)
idf_model.write().overwrite().save("app/models/MLmodel/IDF.model")
idf_model = IDFModel.load("app/models/MLmodel/IDF.model")
tfidf_result = idf_model.transform(cv_result)
data['embedding'] = tfidf_result.toarray()
return data
from pyspark.ml.feature import MaxAbsScaler
def MaxAbs(data, params, type):
spark = SparkSession.builder.appName("MaxMin").config("master", "local[*]").enableHiveSupport().getOrCreate()
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append((vector['text1'],))
if 'text2' in type:
sentences.append((vector['text2'],))
df_document = spark.createDataFrame(sentences, ["text"])
scaler = MaxAbsScaler(inputCol="feature", outputCol="scaledFeatures")
scalerModel = scaler.fit(df_document)
df_rescaled = scalerModel.transform(df_document)
data['vectors'] = df_rescaled.data
return data
from pyspark.ml.feature import MinMaxScaler
def MinMax(data, params, type):
spark = SparkSession.builder.appName("MaxMin").config("master", "local[*]").enableHiveSupport().getOrCreate()
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append((vector['text1'],))
if 'text2' in type:
sentences.append((vector['text2'],))
df_document = spark.createDataFrame(sentences, ["text"])
scaler = MinMaxScaler(inputCol="feature", outputCol="scaledFeatures")
scalerModel = scaler.fit(df_document)
df_rescaled = scalerModel.transform(df_document)
data['vectors'] = df_rescaled.data
return data
from sklearn.decomposition import PCA
def PCASK(data, params, type):
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append((vector['text1'],))
if 'text2' in type:
sentences.append((vector['text2'],))
pca = PCA(0.5)
pca.fit(sentences)
data['vectors'] = pca.transform(sentences)
return data
from sklearn.decomposition import LatentDirichletAllocation
def LDASK(data, params, type):
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append((vector['text1'],))
if 'text2' in type:
sentences.append((vector['text2'],))
lda = LatentDirichletAllocation(0.5)
lda.fit(sentences)
data['vectors'] = lda.transform(sentences)
return data
from pyspark.ml.feature import PCA
def PCASpark(data, params, type):
spark = SparkSession.builder.appName("PCA").config("master", "local[*]").enableHiveSupport().getOrCreate()
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append((vector['text1'],))
if 'text2' in type:
sentences.append((vector['text2'],))
df_document = spark.createDataFrame(sentences, ["text"])
pca = PCA(k=3, inputCol="features", outputCol="pcaFeatures")
model = pca.fit(df_document)
dfresult = model.transform(df_document).select("pcaFeatures")
data['vectors'] = dfresult
return data
def LDASpark(data, params, type):
spark = SparkSession.builder.appName("PCA").config("master", "local[*]").enableHiveSupport().getOrCreate()
sentences = []
for vector in data['vectors']:
if 'text1' in type:
sentences.append((vector['text1'],))
if 'text2' in type:
sentences.append((vector['text2'],))
df_document = spark.createDataFrame(sentences, ["text"])
pca = PCA(k=3, inputCol="features", outputCol="pcaFeatures")
model = pca.fit(df_document)
dfresult = model.transform(df_document).select("pcaFeatures")
data['vectors'] = dfresult
return data
| 37.177686
| 118
| 0.644437
|
4a62edf8e2577e075a6af328b336d3e5a0ff5afd
| 937
|
py
|
Python
|
extensions/.stubs/clrclasses/System/Text/RegularExpressions/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | 1
|
2020-03-25T03:27:24.000Z
|
2020-03-25T03:27:24.000Z
|
extensions/.stubs/clrclasses/System/Text/RegularExpressions/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
extensions/.stubs/clrclasses/System/Text/RegularExpressions/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
from __clrclasses__.System.Text.RegularExpressions import Capture
from __clrclasses__.System.Text.RegularExpressions import CaptureCollection
from __clrclasses__.System.Text.RegularExpressions import Group
from __clrclasses__.System.Text.RegularExpressions import GroupCollection
from __clrclasses__.System.Text.RegularExpressions import Match
from __clrclasses__.System.Text.RegularExpressions import MatchCollection
from __clrclasses__.System.Text.RegularExpressions import MatchEvaluator
from __clrclasses__.System.Text.RegularExpressions import Regex
from __clrclasses__.System.Text.RegularExpressions import RegexCompilationInfo
from __clrclasses__.System.Text.RegularExpressions import RegexMatchTimeoutException
from __clrclasses__.System.Text.RegularExpressions import RegexOptions
from __clrclasses__.System.Text.RegularExpressions import RegexRunner
from __clrclasses__.System.Text.RegularExpressions import RegexRunnerFactory
| 66.928571
| 84
| 0.902882
|
fe145e55b8857c41a49c43c60bb3f12e4fb3179f
| 20,545
|
py
|
Python
|
flux_mito/model_294.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_mito/model_294.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_mito/model_294.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 35000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.115741
| 798
| 0.804089
|
81a2463c34012bb603afc99993d37e17a1ea00cb
| 1,123
|
py
|
Python
|
player_amp/api_functions/Logout.py
|
R0b95/player_amp
|
f5683ceb3a014bf474bbadc37352c3869abfccf9
|
[
"MIT"
] | null | null | null |
player_amp/api_functions/Logout.py
|
R0b95/player_amp
|
f5683ceb3a014bf474bbadc37352c3869abfccf9
|
[
"MIT"
] | null | null | null |
player_amp/api_functions/Logout.py
|
R0b95/player_amp
|
f5683ceb3a014bf474bbadc37352c3869abfccf9
|
[
"MIT"
] | null | null | null |
# player_amp
### written in Phyton 3.8.1 by Strolch
import json
import requests
class Logout:
# logout mainpage
def main(server, port, header, sessionid):
mainendpoint = "/API/Core/Logout"
data = {'SESSIONID': sessionid}
Rückgabe = requests.post(url=('http://' + server + ":" + port + mainendpoint), data=json.dumps(data),
headers=header)
return Rückgabe
# logout game server instances
def instances(server, port, header, instanceids, sessionids):
otherendpoint_part1 = "/API/ADSModule/Servers/"
otherendpoint_part2 = "/API/Core/Logout"
i = 1
Rückgabe = []
while i < len(instanceids):
# an game server logout loop :D
data = {'SESSIONID': sessionids[i]}
post = requests.post(
url=('http://' + server + ":" + port + otherendpoint_part1 + instanceids[i] + otherendpoint_part2),
data=json.dumps(data), headers=header)
Rückgabe.append(post)
i += 1
return Rückgabe
| 31.194444
| 116
| 0.557435
|
7226166cf6652a50e308926e996d0052c35bd170
| 655
|
py
|
Python
|
scripts/dbsessions2trash.py
|
spiffytech/MobileBlur
|
f9d2469caa05f0fe5c05c2ec83d1480cf6b770d8
|
[
"BSD-3-Clause"
] | 6
|
2018-01-25T01:07:55.000Z
|
2019-04-26T23:58:29.000Z
|
scripts/dbsessions2trash.py
|
spiffytech/MobileBlur
|
f9d2469caa05f0fe5c05c2ec83d1480cf6b770d8
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/dbsessions2trash.py
|
spiffytech/MobileBlur
|
f9d2469caa05f0fe5c05c2ec83d1480cf6b770d8
|
[
"BSD-3-Clause"
] | 2
|
2018-02-03T02:55:56.000Z
|
2018-02-06T19:55:10.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from time import mktime
from time import sleep
from time import time
DB_URI = 'sqlite://sessions.sqlite'
EXPIRATION_MINUTES = 60
SLEEP_MINUTES = 5
while 1: # Infinite loop
now = time() # get current Unix timestamp
for row in db().select(db.web2py_session_welcome.ALL):
t = row.modified_datetime
# Convert to a Unix timestamp
t = mktime(t.timetuple())+1e-6*t.microsecond
if now - t > EXPIRATION_MINUTES * 60:
del db.web2py_session_welcome[row.id]
db.commit() # Write changes to database
sleep(SLEEP_MINUTES * 60)
| 24.259259
| 58
| 0.676336
|
fe8b5da814fdd220b8c1c95bb0b19fa1d6da446b
| 3,732
|
py
|
Python
|
tensorflow_probability/python/math/minimize_test.py
|
nbro/probability
|
07a6378155f0ed720b5aaccf5387e3f9a432bd10
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/math/minimize_test.py
|
nbro/probability
|
07a6378155f0ed720b5aaccf5387e3f9a432bd10
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/math/minimize_test.py
|
nbro/probability
|
07a6378155f0ed720b5aaccf5387e3f9a432bd10
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for minimization utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class MinimizeTests(test_util.TestCase):
def test_custom_trace_fn(self):
init_x = np.array([0., 0.]).astype(np.float32)
target_x = np.array([3., 4.]).astype(np.float32)
x = tf.Variable(init_x)
loss_fn = lambda: tf.reduce_sum(input_tensor=(x - target_x)**2)
# The trace_fn should determine the structure and values of the results.
def trace_fn(loss, grads, variables):
del grads
del variables
return {'loss': loss, 'x': x, 'sqdiff': (x - target_x)**2}
results = tfp.math.minimize(loss_fn, num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
trace_fn=trace_fn)
self.evaluate(tf1.global_variables_initializer())
results_ = self.evaluate(results)
self.assertAllClose(results_['x'][0], init_x, atol=0.5)
self.assertAllClose(results_['x'][-1], target_x, atol=0.2)
self.assertAllClose(results_['sqdiff'][-1], [0., 0.], atol=0.1)
def test_respects_trainable_variables(self):
# Variables not included in `trainable_variables` should stay fixed.
x = tf.Variable(5.)
y = tf.Variable(2.)
loss_fn = lambda: tf.reduce_sum(input_tensor=(x - y)**2)
loss = tfp.math.minimize(loss_fn, num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
trainable_variables=[x])
with tf.control_dependencies([loss]):
final_x = tf.identity(x)
final_y = tf.identity(y)
self.evaluate(tf1.global_variables_initializer())
final_x_, final_y_ = self.evaluate((final_x, final_y))
self.assertAllClose(final_x_, 2, atol=0.1)
self.assertEqual(final_y_, 2.) # `y` was untrained, so should be unchanged.
def test_works_when_results_have_dynamic_shape(self):
# Create a variable (and thus loss) with dynamically-shaped result.
x = tf.Variable(initial_value=tf1.placeholder_with_default(
[5., 3.], shape=None))
num_steps = 10
losses, grads = tfp.math.minimize(
loss_fn=lambda: (x - 2.)**2,
num_steps=num_steps,
# TODO(b/137299119) Replace with TF2 optimizer.
optimizer=tf1.train.AdamOptimizer(0.1),
trace_fn=lambda loss, grads, vars: (loss, grads),
trainable_variables=[x])
with tf.control_dependencies([losses]):
final_x = tf.identity(x)
self.evaluate(tf1.global_variables_initializer())
final_x_, losses_, grads_ = self.evaluate((final_x, losses, grads))
self.assertAllEqual(final_x_.shape, [2])
self.assertAllEqual(losses_.shape, [num_steps, 2])
self.assertAllEqual(grads_[0].shape, [num_steps, 2])
if __name__ == '__main__':
tf.test.main()
| 36.950495
| 80
| 0.678189
|
bf57294052f29e4e6cee21d0eae064d864e8d149
| 7,967
|
py
|
Python
|
data_process/crys_data.py
|
GCaptainNemo/materials-ML
|
0cbc2097a31968bdd167e25d4bdb0750e57ee9fb
|
[
"MIT"
] | null | null | null |
data_process/crys_data.py
|
GCaptainNemo/materials-ML
|
0cbc2097a31968bdd167e25d4bdb0750e57ee9fb
|
[
"MIT"
] | null | null | null |
data_process/crys_data.py
|
GCaptainNemo/materials-ML
|
0cbc2097a31968bdd167e25d4bdb0750e57ee9fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: 11360
# datetime: 2021/5/25 16:35
import numpy as np
from numpy import nan
import torch
import ase
class ElementData:
# given formula return atomic number, e.g., element_dict["H"] = 1
element_dict = ase.data.atomic_numbers
# given atomic number, return covalent radii
covalent_radii = ase.data.covalent_radii
periodic_num_tensor = \
torch.tensor([nan,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7], dtype=torch.float32)
group_number_lst = \
torch.tensor([
nan,
1, 2, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2,
3, 4, 5, 6, 2, 3, nan, nan, 1, 2,
3, 4, 5, 6, 7, nan, nan, nan, 3, nan,
nan, nan, nan, 3, nan, nan, 1, 2, 3, 4,
5, 6, 7, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, 3, nan, nan, nan, 2,
3, 4, 5, 6, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan], dtype=torch.float32)
electro_negativity_lst = \
[nan,
2.20, nan, 0.98, 1.57, 2.04, 2.55, 3.04, 3.44, 3.98, nan,
0.93, 1.31, 1.61, 1.98, 2.19, 2.58, 3.16, nan, 0.82, 0.95,
1.36, 1.54, 1.63, 1.66, 1.55, 1.83, 1.88, 1.92, 1.90, 1.65,
1.81, 2.01, 2.18, 2.55, 2.96, 2.00, 0.82, 0.95, 1.22, 1.33,
1.59, 2.16, 1.91, 2.20, 2.28, 2.20, 1.93, 1.69, 1.78, 1.96,
2.05, 2.12, 2.66, 2.60, 0.79, 0.89, 1.11, 1.12, 1.13, 1.14,
1.13, 1.17, 1.19, 1.21, 1.13, 1.22, 1.23, 1.24, 1.25, 1.26,
1.27, 1.32, 1.51, 2.36, 1.93, 2.18, 2.20, 2.28, 2.54, 2.00,
1.62, 2.33, 2.02, 1.99, 2.22, 2.43, 0.71, 0.92, 1.09, 1.32,
1.54, 1.38, 1.36, 1.28, 1.13, 1.28, 1.35, 1.29, 1.31, 1.34,
1.33, 1.36, 1.34, nan, nan, nan, nan, nan]
# 83, 84, Bi, Po unknown
ion_radii_lst = \
torch.tensor([nan,
nan, nan, 0.59, 0.27, 0.11, 0.15, 1.46, 1.38, 1.31, nan,
nan, 0.57, 0.39, 0.26, 0.17, 1.84, 1.81, nan, nan, nan,
0.6, nan, nan, nan, 0.66, 0.49, nan, nan, 0.6, 0.6,
0.47, 0.39, 0.335, 1.98, 1.96, nan, nan, nan, 0.76, nan,
nan, nan, nan, 0.54, nan, nan, 1.0, 0.78, 0.62, 0.55,
0.76, 2.21, 2.2, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, 0.39, nan, nan, nan, 0.96,
0.75, 0.65, 0.5, 0.5, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan], dtype=torch.float32)
# 确定价电子数
valence_electron = [np.nan,
1, 2, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 1, 2,
3, 4, 5, 6, 7, 8, 1, 2, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 1, 2, 3, 4,
5, 6, 7, 8, 1, 2, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 1, 2,
3, 4, 5, 6, 7, 8, 1, 2, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
]
# 价数,价电子数确定为0,价电子数不确定为列表
charge_num = [1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
[3], [2, 3, 4], [2, 3, 4, 5], [2, 3, 4, 5, 6], [2, 3, 4, 5, 6, 7], [2, 3, 4, 6], [2, 3, 4], [2, 3, 4], 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, [3], [4],
[3, 4, 5], [3, 4, 5, 6], [4, 5, 7], [3, 4, 5, 7, 8], [3, 4, 5], [1, 2, 3, 4], 1, 2, 1, 1,
0, 0, 0, 0, 0, 0, [3], [3, 4], [3, 4], [2, 3],
[3], [2, 3], [2, 3], [3], [3, 4], [2, 3], [3], [3], [2, 3], [2, 3],
[3], [4], [3, 4, 5], [4, 5, 6], [4, 5, 6, 7], [4, 5, 6, 7, 8], [3, 4, 5], [2, 4, 5], 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, [3], [4],
[3, 4, 5], [3, 4, 5, 6], [2, 3, 4, 5, 6, 7], [3, 4, 5, 6], [2, 3, 4], [3, 4], [3, 4], [3, 4], np.nan, np.nan]
# 阴离子:-1 阳离子:+1 不确定:np.nan(第四族、稀有气体)
cation_anion_lst = [np.nan,
1, np.nan, 1, 1, 1, np.nan, -1, -1, -1, np.nan,
1, 1, 1, np.nan, -1, -1, -1, np.nan, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, np.nan, -1, -1, -1, np.nan, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, np.nan,
-1, -1, -1, np.nan, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, np.nan, -1, -1, -1, np.nan, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
if __name__ == "__main__":
xushu_lst = [3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 21, 25, 26, 29, 30, 31, 32, 33, 34, 35, 39, 44, 47, 48,
49, 50, 51, 52, 53, 76, 80, 81, 82, 83, 84]
element_lst = ['Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Sc', 'Mn', 'Fe', 'Cu', 'Zn',
'Ga', 'Ge', 'As', 'Se', 'Br', 'Y', 'Ru', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Os', 'Hg', 'Tl',
'Pb', 'Bi', 'Po']
zhouqi_lst = [2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6,
6, 6, 6, 6]
zhuxushu_lst = [1, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 7, 3, 2, 3, 1, 2, 3, 4, 5, 6, 7, 3, 3, 1, 2, 3, 4, 5, 6, 7, 3,
2, 3, 4, 5, 6]
dianfuxing_lst = [0.98, 1.57, 2.04, 2.55, 3.04, 3.44, 3.98, 1.31, 1.61, 1.9, 2.19, 2.58, 3.16, 1.36, 1.55, 1.83,
1.9, 1.65, 1.81, 2.01, 2.18, 2.55, 2.96, 1.22, 2.2, 1.93, 1.69, 1.78, 1.96, 2.05, 2.1, 2.66, 2.2,
2.0, 1.62, 2.33, 2.02, 2.0]
# unit ai
radii_lst = [0.59, 0.27, 0.11, 0.15, 1.46, 1.38, 1.31, 0.57, 0.39, 0.26, 0.17, 1.84, 1.81, 0.6, 0.66, 0.49, 0.6, 0.6, 0.47, 0.39, 0.335, 1.98, 1.96, 0.76, 0.54, 1.0, 0.78, 0.62, 0.55, 0.76, 2.21, 2.2, 0.39, 0.96, 0.75, 0.65, nan, nan]
periodic_number_lst = [np.nan for i in range(109)]
group_number_lst = [np.nan for i in range(109)]
electro_negativity_lst = [np.nan for i in range(109)]
ion_radii_lst = [np.nan for i in range(109)]
for index, xushu in enumerate(xushu_lst):
periodic_number_lst[xushu] = zhouqi_lst[index]
group_number_lst[xushu] = zhuxushu_lst[index]
electro_negativity_lst[xushu] = dianfuxing_lst[index]
ion_radii_lst[xushu] = radii_lst[index]
element_dict = dict()
for i, element in enumerate(element_lst):
element_dict[element_lst[i]] = xushu_lst[i]
print(periodic_number_lst)
print(element_dict)
print(group_number_lst)
print(electro_negativity_lst)
print(ion_radii_lst)
| 48.877301
| 238
| 0.404293
|
5f98b31f3f4b2f6cf16f875c6fc5bac4bb6358d4
| 3,950
|
py
|
Python
|
tensor2tensor/trax/rl/space_serializer_test.py
|
evalphobia/tensor2tensor
|
8a95e96f31d1beccb8efbb2290f6a271600eb3f3
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/trax/rl/space_serializer_test.py
|
evalphobia/tensor2tensor
|
8a95e96f31d1beccb8efbb2290f6a271600eb3f3
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/trax/rl/space_serializer_test.py
|
evalphobia/tensor2tensor
|
8a95e96f31d1beccb8efbb2290f6a271600eb3f3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.trax.rl.space_serializer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import gym
import numpy as np
from tensor2tensor.trax.rl import space_serializer
from tensorflow import test
class BoxSpaceSerializerTest(test.TestCase):
def _make_space_and_serializer(self, low=-10, high=10, shape=(2,)):
# Enough precision to represent float32s accurately.
gin.bind_parameter("BoxSpaceSerializer.precision", 4)
space = gym.spaces.Box(low=low, high=high, shape=shape)
serializer = space_serializer.create(
space,
# Weird vocab_size to test that it doesn't only work with powers of 2.
vocab_size=257)
return (space, serializer)
def _sample_batch(self, space):
return np.reshape(space.sample(), (1,) + space.shape)
def test_representation_length(self):
(space, serializer) = self._make_space_and_serializer()
input_array = self._sample_batch(space)
representation = serializer.serialize(input_array)
self.assertEqual(
representation.shape, (1, serializer.representation_length))
def test_commutes(self):
(space, serializer) = self._make_space_and_serializer()
input_array = self._sample_batch(space)
representation = serializer.serialize(input_array)
output_array = serializer.deserialize(representation)
np.testing.assert_array_almost_equal(input_array, output_array)
def test_representation_changes(self):
(space, serializer) = self._make_space_and_serializer()
array1 = self._sample_batch(space)
array2 = -array1
(repr1, repr2) = tuple(map(serializer.serialize, (array1, array2)))
self.assertFalse(np.array_equal(repr1, repr2))
def test_bounds_space(self):
gin.bind_parameter("BoxSpaceSerializer.max_range", (-10.0, 10.0))
(_, serializer) = self._make_space_and_serializer(
# Too wide range to represent, need to clip.
low=-1e18, high=1e18,
shape=(1,))
input_array = np.array([[1.2345]])
representation = serializer.serialize(input_array)
output_array = serializer.deserialize(representation)
np.testing.assert_array_almost_equal(input_array, output_array)
class DiscreteSpaceSerializerTest(test.TestCase):
def setUp(self):
super(DiscreteSpaceSerializerTest, self).setUp()
self._space = gym.spaces.Discrete(n=2)
self._serializer = space_serializer.create(self._space, vocab_size=2)
def _sample_batch(self):
return np.reshape(self._space.sample(), (1,) + self._space.shape)
def test_representation_length(self):
input_array = self._sample_batch()
representation = self._serializer.serialize(input_array)
self.assertEqual(
representation.shape, (1, self._serializer.representation_length))
def test_commutes(self):
input_array = self._sample_batch()
representation = self._serializer.serialize(input_array)
output_array = self._serializer.deserialize(representation)
np.testing.assert_array_almost_equal(input_array, output_array)
def test_representation_changes(self):
array1 = self._sample_batch()
array2 = 1 - array1
(repr1, repr2) = tuple(map(self._serializer.serialize, (array1, array2)))
self.assertFalse(np.array_equal(repr1, repr2))
if __name__ == "__main__":
test.main()
| 36.238532
| 78
| 0.746582
|
8de8ed8120fe3536b6e68dd355ea11c910ed0714
| 1,282
|
py
|
Python
|
bubbleSort.py
|
binnev/what-can-be-computed
|
58d01ef09096bda2d0a1876575b992d47fcd0470
|
[
"CC-BY-4.0"
] | null | null | null |
bubbleSort.py
|
binnev/what-can-be-computed
|
58d01ef09096bda2d0a1876575b992d47fcd0470
|
[
"CC-BY-4.0"
] | null | null | null |
bubbleSort.py
|
binnev/what-can-be-computed
|
58d01ef09096bda2d0a1876575b992d47fcd0470
|
[
"CC-BY-4.0"
] | null | null | null |
# SISO program bubbleSort.py
# Sorts the words in an ASCII string into lexicographical
# order, using the well-known bubble sort algorithm.
# inString: Represents a list of words separated by whitespace.
# returns: A string consisting of the input words sorted into
# lexicographical order and separated by space characters.
# Example:
# >>> bubbleSort('cap bat apple')
# 'apple bat cap'
import utils
from utils import rf
def bubbleSort(inString):
words = inString.split()
while not isSorted(words):
for i in range(len(words) - 1):
if words[i + 1] < words[i]:
# swap elements i and i+1
words[i], words[i + 1] = words[i + 1], words[i]
return " ".join(words) # single string of words separated by spaces
def isSorted(words):
for i in range(len(words) - 1):
if words[i + 1] < words[i]:
return False
return True
def testBubbleSort():
testvals = [
("here is no water but only rock", "but here is no only rock water"),
("", ""),
("xxxx", "xxxx"),
("apple banana apple", "apple apple banana"),
]
for (inString, solution) in testvals:
val = bubbleSort(inString)
utils.tprint(inString, ":", val)
assert val == solution
| 27.276596
| 77
| 0.617005
|
d2876b52f41747466519fbc70fe5663e9b76171e
| 10,945
|
py
|
Python
|
find_orf.py
|
Lacona/python-translation-project
|
085da4d37f4a0b1817d2b0fb51c5345615b77e7d
|
[
"CC-BY-4.0"
] | null | null | null |
find_orf.py
|
Lacona/python-translation-project
|
085da4d37f4a0b1817d2b0fb51c5345615b77e7d
|
[
"CC-BY-4.0"
] | null | null | null |
find_orf.py
|
Lacona/python-translation-project
|
085da4d37f4a0b1817d2b0fb51c5345615b77e7d
|
[
"CC-BY-4.0"
] | null | null | null |
#! /usr/bin/env python3
import sys
import re
def vet_nucleotide_sequence(sequence):
"""
Return None if `sequence` is a valid RNA or DNA sequence, else raise exception.
Parameters
----------
sequence : str
A string representing a DNA or RNA sequence (upper or lower-case)
Returns
-------
None
Return nothing (None) if sequence is valid, otherwise raise an
exception.
Examples
--------
>>> vet_nucleotide_sequence('ACGTACGT') == None
True
>>> vet_nucleotide_sequence('not a valid sequence')
Traceback (most recent call last):
...
Exception: Invalid sequence: 'not a valid sequence'
Don't allow mixing of DNA and RNA!
>>> vet_nucleotide_sequence('AUTGC')
Traceback (most recent call last):
...
Exception: Invalid sequence: 'AUTGC'
Don't allow whitespace (or other characters) before, within, or after!
>>> vet_nucleotide_sequence(' ACGT ACGT ')
Traceback (most recent call last):
...
Exception: Invalid sequence: ' ACGT ACGT '
But, an empty string should be deemed valid
>>> vet_nucleotide_sequence('') == None
True
"""
##########################################################################
############################ EDIT CODE BELOW #############################
# `rna_pattern_str` and `dna_pattern_str` need to be regular expressions
# that will match any string of zero or more RNA and DNA bases,
# respectively (and only strings of zero or more RNA and DNA bases).
# Currently, `rna_pattern_str` and `dna_pattern_str` are strings of literal
# characters.
# These are valid regular expressions, but they will only match their
# respective strings exactly.
# Change `rna_pattern_str` and `dna_pattern_str` so that they will match
# any valid RNA and DNA sequence strings, respectively (and only strings of
# RNA and DNA bases).
# Read the docstring above for additional clues.
rna_pattern_str = r'^$|^[AUCG]+$'
dna_pattern_str = r'^$|^[ATCG]+$'
##########################################################################
rna_pattern = re.compile(rna_pattern_str, flags=re.IGNORECASE)
dna_pattern = re.compile(dna_pattern_str, flags=re.IGNORECASE)
if rna_pattern.match(sequence):
return
if dna_pattern.match(sequence):
return
else:
raise Exception("Invalid sequence: {0!r}".format(sequence))
def vet_codon(codon):
"""
Return None if `codon` is a valid RNA codon, else raise an exception.
Parameters
----------
codon : str
A string representing a codon (upper or lower-case)
Returns
-------
None
Return nothing (None) if codon is valid, otherwise raise an
exception.
Examples
--------
Valid codon
>>> vet_codon('AUG') == None
True
lower-case is also vaild
>>> vet_codon('aug') == None
True
DNA is not valid
>>> vet_codon('ATG')
Traceback (most recent call last):
...
Exception: Invalid codon: 'ATG'
A codon must be exactly 3 RNA bases long
>>> vet_codon('AUGG')
Traceback (most recent call last):
...
Exception: Invalid codon: 'AUGG'
"""
##########################################################################
############################ EDIT CODE BELOW #############################
# `codon_pattern_str` needs to be a regular expression that will match any
# codon (but only a string that is one codon).
# Currently, `codon_pattern_str` is only a string of literal characters.
# This is a valid regular expression, but it will only match 'AUG' exactly.
# Change `codon_pattern_str` so that it will match any valid codons, and
# only valid codons.
# Read the docstring above for additional clues.
codon_pattern_str = r'[AUGC]{3}$'
##########################################################################
codon_pattern = re.compile(codon_pattern_str, flags=re.IGNORECASE)
if codon_pattern.match(codon):
return
else:
raise Exception("Invalid codon: {0!r}".format(codon))
def find_first_orf(sequence,
start_codons = ['AUG'],
stop_codons = ['UAA', 'UAG', 'UGA']):
"""
Return the first open-reading frame in the DNA or RNA `sequence`.
An open-reading frame (ORF) is the part of an RNA sequence that is
translated into a peptide. It must begin with a start codon, followed by
zero or more codons (triplets of nucleotides), and end with a stop codon.
If there are no ORFs in the sequence, an empty string is returned.
Parameters
----------
sequence : str
A string representing a DNA or RNA sequence (upper or lower-case)
start_codons : list of strings
All possible start codons. Each codon must be a string of 3 RNA bases,
upper or lower-case.
stop_codons : list of strings
All possible stop codons. Each codon must be a string of 3 RNA bases,
upper or lower-case.
Returns
-------
str
An uppercase string of the first ORF found in the `sequence` that
starts with any one of the `start_codons` and ends with any one of the
`stop_codons`. If no ORF is found an empty string is returned.
Examples
--------
When the whole RNA sequence is an ORF:
>>> find_first_orf('AUGGUAUAA', ['AUG'], ['UAA'])
'AUGGUAUAA'
When the whole DNA sequence is an ORF:
>>> find_first_orf('ATGGTATAA', ['AUG'], ['UAA'])
'AUGGUAUAA'
When there is no ORF:
>>> find_first_orf('CUGGUAUAA', ['AUG'], ['UAA'])
''
When there is are bases before and after ORF:
>>> find_first_orf('CCAUGGUAUAACC', ['AUG'], ['UAA'])
'AUGGUAUAA'
"""
# Make sure the sequence is valid
vet_nucleotide_sequence(sequence)
# Make sure the codons are valid
for codon in start_codons:
vet_codon(codon)
for codon in stop_codons:
vet_codon(codon)
# Get copies of everything in uppercase
seq = sequence.upper()
starts = [c.upper() for c in start_codons]
stops = [c.upper() for c in stop_codons]
# Make sure seq is RNA
seq = seq.replace('T', 'U')
##########################################################################
############################ EDIT CODE BELOW #############################
# `orf_pattern_str` needs to be a regular expression that will match an
# open reading frame within a string of RNA bases. At this point we know
# the string only contains uppercase A, C, G, and U.
# I recommend starting by hardcoding the standard start and stop codons
# (the ones listed as defaults for this function) into the regular
# expression. After you get that working, then try generalizing it to work
# for any start/stop codons.
# Currently, `orf_pattern_str` is only a string of literal characters. This
# is a valid regular expression, but it will only match 'AUGGUAUAA'
# exactly. Change `orf_pattern_str` so that it will match any open reading
# frame.
# Read the docstring above for additional clues.
if start_codons == ['AUG']:
orf_pattern_str = r'AUG([AUCG]{3})*(UAG|UGA|UAA)'
else:
orf_pattern_str = r'AAA([AUCG]{3})*UUU'
##########################################################################
# Create the regular expression object
orf_pattern = re.compile(orf_pattern_str)
# Search the sequence
match_object = orf_pattern.search(seq)
if match_object:
return match_object.group()
return ''
def parse_sequence_from_path(path):
# Try to open the path to read from it, and handle exceptions if they
# arrise
try:
file_stream = open(path, 'r')
except FileNotFoundError as e:
sys.stderr.write("Sorry, couldn't find path {}".format(path))
raise e
except IsADirectoryError as e:
sys.stderr.write("Sorry, path {} appears to be a directory".format(
path))
raise e
except:
sys.stderr.write("Sorry, something went wrong when trying to open {}".format(
path))
raise
# If we've reached here, the file is open and ready to read
sequence = ''
# A for loop to visit each line in the file
for line in file_stream:
# Strip whitespace from the line and concatenate it to the end of the
# sequence
sequence += line.strip()
return sequence
def main():
import argparse
# Create a command-line parser object
parser = argparse.ArgumentParser()
default_start_codons = ['AUG']
default_stop_codons = ['UAA', 'UAG', 'UGA']
# Tell the parser what command-line arguments this script can receive
parser.add_argument('sequence',
metavar = 'SEQUENCE',
type = str,
help = ('The sequence to search for an open-reading frame. '
'If the path flag (\'-p\'/\'--path\') is specified, '
'then this should be a path to a file containing the '
'sequence to be searched.'))
parser.add_argument('-p', '--path',
action = 'store_true',
help = ('The sequence argument should be treated as a path to a '
'containing the sequence to be searched.'))
parser.add_argument('-s', '--start-codon',
type = str,
action = 'append', # append each argument to a list
default = None,
help = ('A start codon. This option can be used multiple times '
'if there are multiple start codons. '
'Default: {0}.'.format(" ".join(default_start_codons))))
parser.add_argument('-x', '--stop-codon',
type = str,
action = 'append', # append each argument to a list
default = None,
help = ('A stop codon. This option can be used multiple times '
'if there are multiple stop codons. '
'Default: {0}.'.format(" ".join(default_stop_codons))))
# Parse the command-line arguments into a 'dict'-like container
args = parser.parse_args()
# Check to see if the path option was set to True by the caller. If so, parse
# the sequence from the path
if args.path:
sequence = parse_sequence_from_path(args.sequence)
else:
sequence = args.sequence
# Check to see if start/stop codons were provided by the caller. If not,
# use the defaults.
if not args.start_codon:
args.start_codon = default_start_codons
if not args.stop_codon:
args.stop_codon = default_stop_codons
orf = find_first_orf(sequence = sequence,
start_codons = args.start_codon,
stop_codons = args.stop_codon)
sys.stdout.write('{}\n'.format(orf))
if __name__ == '__main__':
main()
| 34.968051
| 85
| 0.59534
|
12f77716d99eb40b78741bfd4a4e1c57c70cb03f
| 2,283
|
py
|
Python
|
tests/riscv/paging/paging_memory_attributes_basic_force.py
|
Wlgen/force-riscv
|
9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8
|
[
"Apache-2.0"
] | null | null | null |
tests/riscv/paging/paging_memory_attributes_basic_force.py
|
Wlgen/force-riscv
|
9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8
|
[
"Apache-2.0"
] | null | null | null |
tests/riscv/paging/paging_memory_attributes_basic_force.py
|
Wlgen/force-riscv
|
9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
import MemoryTraits
import RandomUtils
# This test verifies that memory attributes can be set on specific regions of memory.
class MainSequence(Sequence):
def generate(self, **kargs):
for _ in range(50):
size_bits = RandomUtils.random32(1, 20)
size = 2 ** size_bits
align_bits = RandomUtils.random32(1, size_bits)
align = 2 ** align_bits
arch_mem_attr = self.choice(
("MainRegion", "IORegion", "CacheableShared", "Uncacheable")
)
impl_mem_attr = self.choice(("DMA Controller", "UART 0", "UART 1", "DDR Control"))
start_addr = self.genPA(
Size=size,
Align=align,
Type="D",
MemAttrArch=arch_mem_attr,
MemAttrImpl=impl_mem_attr,
)
end_addr = start_addr + size - 1
if not MemoryTraits.hasMemoryAttribute(arch_mem_attr, start_addr, end_addr):
self.error(
"Memory attribute %s not assigned to physical address range 0x%x-0x%x"
% (arch_mem_attr, start_addr, end_addr)
)
if not MemoryTraits.hasMemoryAttribute(impl_mem_attr, start_addr, end_addr):
self.error(
"Memory attribute %s not assigned to physical address range 0x%x-0x%x"
% (impl_mem_attr, start_addr, end_addr)
)
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| 38.05
| 94
| 0.638633
|
2939f189207a1f367a5d3fe4de12c42b9730289b
| 2,530
|
py
|
Python
|
tests/integration/setup/test_egg.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2019-09-17T17:48:55.000Z
|
2019-09-17T17:48:55.000Z
|
tests/integration/setup/test_egg.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/integration/setup/test_egg.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
tests.integration.setup.test_egg
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import shutil
# Import Salt Testing libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
from tests.support.helpers import VirtualEnv, destructiveTest
from tests.support.case import ModuleCase
# Import salt libs
import salt.utils.path
import salt.utils.platform
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
@destructiveTest
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
class EggSetupTest(ModuleCase):
'''
Tests for building and installing egg packages
'''
def setUp(self):
# ensure we have a clean build dir
self._clean_build()
def _clean_build(self):
'''
helper method to clean the build dir
'''
dirs = [os.path.join(RUNTIME_VARS.CODE_DIR, 'build'),
os.path.join(RUNTIME_VARS.CODE_DIR, 'salt.egg-info'),
os.path.join(RUNTIME_VARS.CODE_DIR, 'dist')]
for _dir in dirs:
if os.path.exists(_dir):
shutil.rmtree(_dir)
def test_egg_install(self):
'''
test installing an egg package
'''
# Let's create the testing virtualenv
with VirtualEnv() as venv:
ret = self.run_function('cmd.run', ['{0} setup.py install --prefix={1}'.format(venv.venv_python,
venv.venv_dir)],
cwd=RUNTIME_VARS.CODE_DIR)
self._clean_build()
lib_dir = os.path.join(venv.venv_dir, 'lib')
for _dir in os.listdir(lib_dir):
site_pkg = os.path.join(lib_dir, _dir, 'site-packages')
for _file in os.listdir(site_pkg):
if _file.startswith('salt-'):
egg = os.path.join(venv.venv_dir, _file)
assert os.path.exists(os.path.join(site_pkg, _file, 'salt', '_version.py'))
break
# Let's ensure the version is correct
pip_ver = self.run_function('pip.list', bin_env=venv.venv_dir).get('salt')
egg_ver = [x for x in egg.split('/')[-1:][0].split('-') if re.search(r'^\d.\d*', x)][0]
assert pip_ver == egg_ver.replace('_', '-')
| 36.142857
| 108
| 0.582213
|
a423cf9a62eee4d5708d306a9b3a67e45d08a1a9
| 1,449
|
py
|
Python
|
sample_generator/sample_generator/wsgi.py
|
BL-Labs/sample_generator_datatools
|
0815454987dfd6ddfae1b0525685eae6b2bda8c2
|
[
"MIT"
] | 1
|
2020-02-26T17:01:46.000Z
|
2020-02-26T17:01:46.000Z
|
sample_generator/sample_generator/wsgi.py
|
BL-Labs/sample_generator_datatools
|
0815454987dfd6ddfae1b0525685eae6b2bda8c2
|
[
"MIT"
] | null | null | null |
sample_generator/sample_generator/wsgi.py
|
BL-Labs/sample_generator_datatools
|
0815454987dfd6ddfae1b0525685eae6b2bda8c2
|
[
"MIT"
] | null | null | null |
"""
WSGI config for sample_generator project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "sample_generator.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sample_generator.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 43.909091
| 79
| 0.806763
|
4fcdabf72bb23df3361358d89811a2f4714cc1bd
| 626
|
py
|
Python
|
kirberichuk/wsgi.py
|
kirberich/kirberich.uk
|
cad723408946afc749dd45ffa8adc2b7197aa815
|
[
"Apache-2.0"
] | null | null | null |
kirberichuk/wsgi.py
|
kirberich/kirberich.uk
|
cad723408946afc749dd45ffa8adc2b7197aa815
|
[
"Apache-2.0"
] | null | null | null |
kirberichuk/wsgi.py
|
kirberich/kirberich.uk
|
cad723408946afc749dd45ffa8adc2b7197aa815
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for kirberichuk project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
from kirberichuk.boot import fix_path
fix_path()
import os
from django.core.wsgi import get_wsgi_application
from djangae.wsgi import DjangaeApplication
from djangae.utils import on_production
settings = "kirberichuk.settings_live" if on_production() else "kirberichuk.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings)
application = DjangaeApplication(get_wsgi_application())
| 26.083333
| 85
| 0.809904
|
adb50da013c8d4679214bf962f7267584d17d821
| 5,533
|
py
|
Python
|
merlin/models/tf/prediction_tasks/retrieval.py
|
rhdong/models
|
4b3a1288bff9cc48778e7830d1942f3441bc6c18
|
[
"Apache-2.0"
] | null | null | null |
merlin/models/tf/prediction_tasks/retrieval.py
|
rhdong/models
|
4b3a1288bff9cc48778e7830d1942f3441bc6c18
|
[
"Apache-2.0"
] | null | null | null |
merlin/models/tf/prediction_tasks/retrieval.py
|
rhdong/models
|
4b3a1288bff9cc48778e7830d1942f3441bc6c18
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Optional, Sequence
import tensorflow as tf
from tensorflow.python.layers.base import Layer
from merlin.models.tf.blocks.core.base import Block, MetricOrMetrics
from merlin.models.tf.blocks.core.transformations import L2Norm, LogitsTemperatureScaler
from merlin.models.tf.blocks.retrieval.base import ItemRetrievalScorer
from merlin.models.tf.blocks.sampling.base import ItemSampler
from merlin.models.tf.blocks.sampling.in_batch import InBatchSampler
from merlin.models.tf.losses import LossType, loss_registry
from merlin.models.tf.metrics.ranking import ranking_metrics
from merlin.models.tf.prediction_tasks.classification import MultiClassClassificationTask
from merlin.schema import Schema, Tags
@tf.keras.utils.register_keras_serializable(package="merlin_models")
class ItemRetrievalTask(MultiClassClassificationTask):
"""Prediction-task for item-retrieval.
Parameters
----------
schema: Schema
The schema object including features to use and their properties.
loss: Optional[LossType]
Loss function.
Defaults to `categorical_crossentropy`.
metrics: MetricOrMetrics
List of top-k ranking metrics.
Defaults to a numver of ranking metrics.
samplers: List[ItemSampler]
List of samplers for negative sampling, by default `[InBatchSampler()]`
extra_pre_call: Optional[PredictionBlock]
Optional extra pre-call block. Defaults to None.
target_name: Optional[str]
If specified, name of the target tensor to retrieve from dataloader.
Defaults to None.
task_name: Optional[str]
name of the task.
Defaults to None.
task_block: Block
The `Block` that applies additional layers op to inputs.
Defaults to None.
logits_temperature: float
Parameter used to reduce the model overconfidence, so that logits / T.
Defaults to 1.
normalize: bool
Apply L2 normalization before computing dot interactions.
Defaults to True.
Returns
-------
PredictionTask
The item retrieval prediction task
"""
DEFAULT_LOSS = "categorical_crossentropy"
DEFAULT_METRICS = ranking_metrics(top_ks=[10])
def __init__(
self,
schema: Schema,
loss: Optional[LossType] = DEFAULT_LOSS,
metrics: MetricOrMetrics = DEFAULT_METRICS,
samplers: Sequence[ItemSampler] = (),
target_name: Optional[str] = None,
task_name: Optional[str] = None,
task_block: Optional[Layer] = None,
extra_pre_call: Optional[Block] = None,
logits_temperature: float = 1.0,
normalize: bool = True,
cache_query: bool = False,
**kwargs,
):
self.item_id_feature_name = schema.select_by_tag(Tags.ITEM_ID).column_names[0]
self.cache_query = cache_query
pre = self._build_prediction_call(samplers, normalize, logits_temperature, extra_pre_call)
self.loss = loss_registry.parse(loss)
super().__init__(
loss=self.loss,
metrics=metrics,
target_name=target_name,
task_name=task_name,
task_block=task_block,
pre=pre,
**kwargs,
)
def _build_prediction_call(
self,
samplers: Sequence[ItemSampler],
normalize: bool,
logits_temperature: float,
extra_pre_call: Optional[Block] = None,
):
if samplers is None or len(samplers) == 0:
samplers = (InBatchSampler(),)
prediction_call = ItemRetrievalScorer(
samplers=samplers,
item_id_feature_name=self.item_id_feature_name,
cache_query=self.cache_query,
)
if normalize:
prediction_call = L2Norm().connect(prediction_call)
if logits_temperature != 1:
prediction_call = prediction_call.connect(LogitsTemperatureScaler(logits_temperature))
if extra_pre_call is not None:
prediction_call = prediction_call.connect(extra_pre_call)
return prediction_call
@property
def retrieval_scorer(self):
def find_retrieval_scorer_block(block):
if isinstance(block, ItemRetrievalScorer):
return block
if getattr(block, "layers", None):
for subblock in block.layers:
result = find_retrieval_scorer_block(subblock)
if result:
return result
return None
result = find_retrieval_scorer_block(self.pre)
if result is None:
raise Exception("An ItemRetrievalScorer layer was not found in the model.")
return result
def set_retrieval_cache_query(self, value: bool):
self.retrieval_scorer.cache_query = value
| 35.696774
| 98
| 0.665462
|
af1e5e6a4ccc6ba8c9b6b9bba8665fdd167f3a8e
| 13,342
|
py
|
Python
|
source/rttov_test/profile-datasets-py/div83/015.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | null | null | null |
source/rttov_test/profile-datasets-py/div83/015.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T12:19:59.000Z
|
2022-03-12T12:19:59.000Z
|
source/rttov_test/profile-datasets-py/div83/015.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Profile ../profile-datasets-py/div83/015.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div83/015.py"
self["Q"] = numpy.array([ 2.60257300e+00, 3.31323900e+00, 4.59202900e+00,
5.85201600e+00, 6.37033900e+00, 6.39497900e+00,
6.34082000e+00, 6.15109200e+00, 5.85454600e+00,
5.57718900e+00, 5.34293100e+00, 5.10522400e+00,
4.89287600e+00, 4.69749800e+00, 4.50562000e+00,
4.31062100e+00, 4.22007200e+00, 4.16390300e+00,
4.10717300e+00, 4.04827400e+00, 4.00374400e+00,
3.94528400e+00, 3.80974500e+00, 3.65032700e+00,
3.49392800e+00, 3.34239900e+00, 3.18990000e+00,
3.06484100e+00, 2.97276100e+00, 2.91252200e+00,
2.87716200e+00, 2.85829200e+00, 2.85206200e+00,
2.85262200e+00, 2.86653200e+00, 2.89440200e+00,
2.92905100e+00, 2.97818100e+00, 3.09087000e+00,
3.36254900e+00, 3.97025400e+00, 5.15052300e+00,
6.51731800e+00, 7.43669500e+00, 7.84418800e+00,
7.76357000e+00, 7.15918900e+00, 6.94659200e+00,
7.70454100e+00, 9.14627600e+00, 1.15166700e+01,
1.52030700e+01, 2.03917800e+01, 2.68810800e+01,
3.42915200e+01, 4.14018900e+01, 4.55331300e+01,
4.44639200e+01, 4.88105200e+01, 6.64425900e+01,
9.34987600e+01, 1.20612500e+02, 1.22859900e+02,
1.10025900e+02, 9.93120400e+01, 9.94673100e+01,
1.16921300e+02, 1.33658100e+02, 1.32071600e+02,
9.31999100e+01, 1.52190800e+02, 4.17380700e+02,
9.12682300e+02, 1.49417400e+03, 1.98758200e+03,
2.39674200e+03, 2.86638000e+03, 3.48043400e+03,
4.18654900e+03, 4.63047900e+03, 5.41220900e+03,
6.64231500e+03, 8.51134500e+03, 1.05870200e+04,
1.26706900e+04, 1.46329000e+04, 1.62738800e+04,
1.73780600e+04, 1.79466300e+04, 1.82253700e+04,
1.84237000e+04, 1.86969600e+04, 1.92474600e+04,
1.99298300e+04, 2.09122400e+04, 2.26196100e+04,
2.09907400e+04, 1.77290100e+04, 1.72436400e+04,
1.67778000e+04, 1.63305800e+04])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17778000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23442000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90893000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53628000e+02, 7.77790000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31524000e+02, 9.58591000e+02,
9.86067000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 379.087 , 379.0867, 379.0883, 379.0888, 379.0926, 379.0966,
379.1106, 379.1437, 379.2008, 379.2939, 379.4 , 379.4881,
379.5551, 379.6502, 379.8123, 379.9984, 380.1794, 380.3094,
380.3734, 380.3845, 380.3975, 380.4365, 380.4796, 380.4986,
380.5367, 380.5877, 380.6998, 380.8298, 381.0139, 381.2229,
381.5639, 381.9299, 382.3879, 382.8789, 383.3309, 383.7789,
384.2509, 384.7399, 385.2578, 385.8047, 386.3815, 386.861 ,
387.2165, 387.5831, 387.656 , 387.733 , 387.7812, 387.8053,
387.831 , 387.8585, 387.8865, 387.8841, 387.8691, 387.8496,
387.8207, 387.7909, 387.7773, 387.7658, 387.7581, 387.7502,
387.7387, 387.7322, 387.7364, 387.7463, 387.7565, 387.7554,
387.7447, 387.7192, 387.6868, 387.6619, 387.588 , 387.4322,
387.1883, 386.912 , 386.6849, 386.4964, 386.3125, 386.0806,
385.8349, 385.639 , 385.1919, 384.4811, 383.4364, 382.3448,
381.2878, 380.468 , 379.8383, 379.4689, 379.3505, 379.398 ,
379.5599, 379.8565, 380.1691, 380.3633, 380.3325, 379.9625,
380.8287, 382.2675, 382.5615, 382.7782, 382.9523])
self["CO"] = numpy.array([ 0.2376114 , 0.2310222 , 0.218331 , 0.1980398 , 0.1700679 ,
0.1362871 , 0.1192042 , 0.1335782 , 0.1315462 , 0.1219733 ,
0.1056614 , 0.08735825, 0.07373554, 0.06684779, 0.06280282,
0.05945484, 0.05643496, 0.05349028, 0.05034389, 0.04686141,
0.04290443, 0.03929514, 0.03609286, 0.03383228, 0.03236879,
0.0313725 , 0.0309121 , 0.03050441, 0.03032101, 0.03015271,
0.03030901, 0.03047731, 0.03235621, 0.0345626 , 0.03711509,
0.04005128, 0.04349627, 0.04765376, 0.05247604, 0.0588576 ,
0.06650864, 0.07631201, 0.08909702, 0.1047132 , 0.1188901 ,
0.1357719 , 0.1486829 , 0.1576819 , 0.1671897 , 0.1747104 ,
0.1828919 , 0.1857452 , 0.1863622 , 0.186569 , 0.1858536 ,
0.1851153 , 0.1843496 , 0.1835628 , 0.1828851 , 0.1822699 ,
0.18177 , 0.1815591 , 0.1813547 , 0.181378 , 0.181401 ,
0.181383 , 0.1813508 , 0.1811158 , 0.1807271 , 0.1807422 ,
0.1813544 , 0.1822349 , 0.183889 , 0.1856292 , 0.1876333 ,
0.1895267 , 0.1905183 , 0.1911623 , 0.1905469 , 0.1901265 ,
0.1899424 , 0.1905489 , 0.1918491 , 0.194571 , 0.1976238 ,
0.2002158 , 0.2015222 , 0.2017637 , 0.2005648 , 0.1995545 ,
0.2008285 , 0.2178238 , 0.2465602 , 0.265208 , 0.2829955 ,
0.3289638 , 0.3663756 , 0.3708289 , 0.3737639 , 0.376753 ,
0.3797987 ])
self["T"] = numpy.array([ 181.415, 188.663, 202.145, 218.39 , 233.079, 245.172,
254.351, 259.775, 261.488, 260.981, 258.676, 256.075,
254.504, 252.532, 248.704, 242.06 , 236.185, 234.165,
233.254, 232.105, 229.946, 228.679, 227.349, 225.985,
224.622, 223.29 , 221.79 , 220.303, 219.109, 218.171,
217.236, 216.025, 215.405, 214.601, 214.03 , 213.394,
212.129, 209.861, 206.794, 203.705, 201.151, 200.35 ,
200.2 , 199.941, 199.328, 199.426, 199.844, 200.707,
202.685, 205.299, 208.104, 210.976, 213.884, 216.839,
219.824, 222.781, 225.636, 228.309, 230.83 , 233.253,
235.693, 238.268, 240.96 , 243.496, 245.877, 248.112,
250.281, 252.378, 254.426, 256.413, 258.16 , 259.825,
261.418, 263.398, 265.364, 267.312, 269.09 , 270.792,
272.433, 274.089, 275.713, 277.314, 278.888, 280.451,
282.003, 283.585, 285.259, 287.074, 288.987, 290.979,
292.996, 295.001, 296.96 , 298.856, 300.265, 300.22 ,
298.116, 295.41 , 295.41 , 295.41 , 295.41 ])
self["N2O"] = numpy.array([ 0.01106997, 0.00804997, 0.00579997, 0.00401998, 0.00346998,
0.00332998, 0.00494997, 0.00742995, 0.01222993, 0.01305993,
0.01701991, 0.0197599 , 0.0213299 , 0.03204985, 0.04187981,
0.05044978, 0.06078974, 0.07533969, 0.08968963, 0.1070196 ,
0.1235295 , 0.1373195 , 0.1495294 , 0.1612194 , 0.1751094 ,
0.1888094 , 0.2019894 , 0.2180193 , 0.2325593 , 0.2440993 ,
0.2548493 , 0.2623893 , 0.2693492 , 0.2758692 , 0.2822692 ,
0.2885592 , 0.2946791 , 0.3005191 , 0.3059991 , 0.310999 ,
0.3154087 , 0.3172084 , 0.3188779 , 0.3203976 , 0.3217275 ,
0.3228275 , 0.3236677 , 0.3241977 , 0.3243875 , 0.324387 ,
0.3243863 , 0.3243851 , 0.3243834 , 0.3243813 , 0.3243789 ,
0.3243766 , 0.3243752 , 0.3243756 , 0.3243742 , 0.3243684 ,
0.3243597 , 0.3243509 , 0.3243501 , 0.3243543 , 0.3243578 ,
0.3243577 , 0.3243521 , 0.3243466 , 0.3243472 , 0.3243598 ,
0.3243406 , 0.3242546 , 0.3240939 , 0.3239053 , 0.3237452 ,
0.3236125 , 0.3234602 , 0.323261 , 0.3230319 , 0.3228879 ,
0.3226343 , 0.3222353 , 0.321629 , 0.3209557 , 0.3202798 ,
0.3196432 , 0.3191109 , 0.3187527 , 0.3185683 , 0.3184779 ,
0.3184135 , 0.3183249 , 0.3181463 , 0.317925 , 0.3176063 ,
0.3170524 , 0.3175808 , 0.3186389 , 0.3187963 , 0.3189474 ,
0.3190925 ])
self["O3"] = numpy.array([ 0.1267017 , 0.1557845 , 0.2501229 , 0.512252 , 0.8582125 ,
1.206732 , 1.57162 , 1.945138 , 2.333926 , 2.861794 ,
3.537261 , 4.321478 , 5.207445 , 6.191161 , 7.281857 ,
8.450114 , 9.120642 , 9.298981 , 9.296422 , 9.227683 ,
9.123983 , 8.950465 , 8.691007 , 8.29654 , 7.742003 ,
7.032976 , 6.22955 , 5.432993 , 4.714096 , 4.090478 ,
3.54823 , 3.082301 , 2.705532 , 2.363023 , 2.088534 ,
1.847445 , 1.630565 , 1.448136 , 1.266676 , 1.024717 ,
0.7358281 , 0.5708771 , 0.4351852 , 0.3303685 , 0.2448851 ,
0.1858396 , 0.1475139 , 0.1324131 , 0.1167451 , 0.1029521 ,
0.09148565, 0.08043998, 0.07131005, 0.06514055, 0.06166879,
0.05981822, 0.05835204, 0.05765764, 0.05764909, 0.05690312,
0.05572029, 0.05508016, 0.05663374, 0.05988041, 0.06264158,
0.06385205, 0.06305123, 0.06252034, 0.06201811, 0.06136308,
0.06096012, 0.06078322, 0.06084332, 0.06116587, 0.06184763,
0.06287644, 0.06377657, 0.06421373, 0.06433503, 0.06509338,
0.06660824, 0.06830357, 0.06961192, 0.06927147, 0.06783979,
0.06564033, 0.06334429, 0.06185124, 0.06144276, 0.06150101,
0.06122455, 0.05986577, 0.05667681, 0.05184454, 0.04753902,
0.04286008, 0.04251641, 0.02627408, 0.02628706, 0.02629952,
0.02631148])
self["CH4"] = numpy.array([ 0.4414649, 0.3630808, 0.3048156, 0.2650684, 0.2894662,
0.318894 , 0.3981865, 0.4693411, 0.5360639, 0.5988167,
0.6815104, 0.7585961, 0.8289109, 0.9617915, 1.073095 ,
1.141665 , 1.200855 , 1.245365 , 1.289035 , 1.340305 ,
1.389154 , 1.430214 , 1.466694 , 1.501645 , 1.539125 ,
1.575665 , 1.610815 , 1.628705 , 1.640655 , 1.649245 ,
1.658465 , 1.668335 , 1.678875 , 1.687555 , 1.695915 ,
1.703805 , 1.711065 , 1.717515 , 1.723215 , 1.729224 ,
1.735553 , 1.742201 , 1.749179 , 1.768077 , 1.769486 ,
1.770966 , 1.772367 , 1.773738 , 1.775446 , 1.779034 ,
1.782759 , 1.785693 , 1.788334 , 1.791122 , 1.794098 ,
1.797186 , 1.800868 , 1.80471 , 1.807842 , 1.81047 ,
1.81282 , 1.814371 , 1.816017 , 1.81791 , 1.819849 ,
1.821469 , 1.822967 , 1.824636 , 1.826509 , 1.82841 ,
1.830001 , 1.831085 , 1.831047 , 1.83082 , 1.830425 ,
1.830233 , 1.830498 , 1.830626 , 1.83143 , 1.832734 ,
1.833473 , 1.833132 , 1.83127 , 1.828742 , 1.826115 ,
1.824338 , 1.823287 , 1.823383 , 1.824233 , 1.825109 ,
1.825526 , 1.825283 , 1.824445 , 1.823332 , 1.822151 ,
1.819521 , 1.822984 , 1.829372 , 1.830462 , 1.831399 ,
1.832232 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 295.41
self["S2M"]["Q"] = 16330.5845347
self["S2M"]["O"] = 0.0263114846257
self["S2M"]["P"] = 1007.30103
self["S2M"]["U"] = 0.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 1
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 295.41
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = 29.794
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([2006, 9, 1])
self["TIME"] = numpy.array([0, 0, 0])
| 57.508621
| 92
| 0.566931
|
10cfed9c17f9a02f08ea05b9683fb782ace42a0a
| 617
|
py
|
Python
|
release.py
|
RolandMQuiros/DeBroglie
|
6e9d94d844f4a24ae919ab61f042c65ef5834e97
|
[
"MIT"
] | null | null | null |
release.py
|
RolandMQuiros/DeBroglie
|
6e9d94d844f4a24ae919ab61f042c65ef5834e97
|
[
"MIT"
] | null | null | null |
release.py
|
RolandMQuiros/DeBroglie
|
6e9d94d844f4a24ae919ab61f042c65ef5834e97
|
[
"MIT"
] | null | null | null |
import subprocess
import os
import shutil
# Build the project
subprocess.check_call(["dotnet","build","DeBroglie.Console/DeBroglie.Console.csproj","-c","Release"])
# Build the docs
subprocess.check_call(["docfx","docs/docfx.json"])
# Move everything to a fresh folder
shutil.rmtree("release", ignore_errors=True)
shutil.copytree("DeBroglie.Console/bin/Release", "release/bin")
shutil.copytree("docs-generated", "release/docs")
shutil.copy("README.md", "release")
shutil.copy("LICENSE.txt", "release")
# zip it up
shutil.make_archive("release", "zip", "release")
# Cleanup
shutil.rmtree("release", ignore_errors=True)
| 34.277778
| 101
| 0.756888
|
52e46394cea62f5b9639c425462b482b5b8adbfa
| 2,734
|
py
|
Python
|
youtuatools/extractor/wakanim.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 47
|
2021-01-02T07:44:50.000Z
|
2022-02-28T22:02:13.000Z
|
youtuatools/extractor/wakanim.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 4
|
2021-02-07T03:35:13.000Z
|
2021-10-31T19:23:53.000Z
|
youtuatools/extractor/wakanim.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 8
|
2021-01-03T05:44:39.000Z
|
2021-11-01T05:46:32.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
merge_dicts,
urljoin,
)
class WakanimIE(InfoExtractor):
_VALID_URL = r"https://(?:www\.)?wakanim\.tv/[^/]+/v2/catalogue/episode/(?P<id>\d+)"
_TESTS = [
{
"url": "https://www.wakanim.tv/de/v2/catalogue/episode/2997/the-asterisk-war-omu-staffel-1-episode-02-omu",
"info_dict": {
"id": "2997",
"ext": "mp4",
"title": "Episode 02",
"description": "md5:2927701ea2f7e901de8bfa8d39b2852d",
"series": "The Asterisk War (OmU.)",
"season_number": 1,
"episode": "Episode 02",
"episode_number": 2,
},
"params": {
"format": "bestvideo",
"skip_download": True,
},
},
{
# DRM Protected
"url": "https://www.wakanim.tv/de/v2/catalogue/episode/7843/sword-art-online-alicization-omu-arc-2-folge-15-omu",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m3u8_url = urljoin(
url,
self._search_regex(
r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage,
"m3u8 url",
group="url",
),
)
# https://docs.microsoft.com/en-us/azure/media-services/previous/media-services-content-protection-overview#streaming-urls
encryption = self._search_regex(
r"encryption%3D(c(?:enc|bc(?:s-aapl)?))",
m3u8_url,
"encryption",
default=None,
)
if encryption and encryption in ("cenc", "cbcs-aapl"):
raise ExtractorError("This video is DRM protected.", expected=True)
formats = self._extract_m3u8_formats(
m3u8_url, video_id, "mp4", entry_protocol="m3u8_native", m3u8_id="hls"
)
info = self._search_json_ld(webpage, video_id, default={})
title = self._search_regex(
(
r'<h1[^>]+\bclass=["\']episode_h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
r'<span[^>]+\bclass=["\']episode_title["\'][^>]*>(?P<title>[^<]+)',
),
webpage,
"title",
default=None,
group="title",
)
return merge_dicts(
info,
{
"id": video_id,
"title": title,
"formats": formats,
},
)
| 31.068182
| 130
| 0.483906
|
8491062904216fe5a471c5cdb0afd4a6ff5c9518
| 338
|
py
|
Python
|
pdb2pqr/pdb2pqr.py
|
Electrostatics/apbs-etl
|
66b6a34e3ebf4ffae921a85344c07e83cc2ad1a9
|
[
"BSD-3-Clause"
] | 1
|
2021-09-30T20:46:06.000Z
|
2021-09-30T20:46:06.000Z
|
pdb2pqr/pdb2pqr.py
|
Electrostatics/apbs-etl
|
66b6a34e3ebf4ffae921a85344c07e83cc2ad1a9
|
[
"BSD-3-Clause"
] | 23
|
2021-09-30T18:56:53.000Z
|
2022-01-04T00:34:41.000Z
|
pdb2pqr/pdb2pqr.py
|
Electrostatics/apbs-etl
|
66b6a34e3ebf4ffae921a85344c07e83cc2ad1a9
|
[
"BSD-3-Clause"
] | null | null | null |
"""TODO """
from typing import List
from pdbx.containers import DataContainer
from .io import read_input
from .process_cli import process_cli
def main():
"""Hook for command-line usage."""
args = process_cli()
data_containers: List[DataContainer] = read_input(args.input_file)
# transform_data()
# write_output()
| 19.882353
| 70
| 0.715976
|
fc8ace54483a754bf1a41b20fb89da27ffd60ac8
| 85
|
py
|
Python
|
tests/context.py
|
threatlead/amazonscraper
|
f8b520cfcadf661c0ce587dcbbd3581e8b094b4a
|
[
"MIT"
] | null | null | null |
tests/context.py
|
threatlead/amazonscraper
|
f8b520cfcadf661c0ce587dcbbd3581e8b094b4a
|
[
"MIT"
] | null | null | null |
tests/context.py
|
threatlead/amazonscraper
|
f8b520cfcadf661c0ce587dcbbd3581e8b094b4a
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import amazonscraper
| 14.166667
| 41
| 0.741176
|
90becd6d00bbb48d0c3cccc3a56026653effa77a
| 9,598
|
py
|
Python
|
tensorboard/plugins/graph/graphs_plugin_test.py
|
SongzzZ/tensorboard
|
4ee9ae8a31524131eb56f7a1dc4aa09d7d186f10
|
[
"Apache-2.0"
] | 1
|
2020-02-20T05:37:55.000Z
|
2020-02-20T05:37:55.000Z
|
tensorboard/plugins/graph/graphs_plugin_test.py
|
SongzzZ/tensorboard
|
4ee9ae8a31524131eb56f7a1dc4aa09d7d186f10
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/plugins/graph/graphs_plugin_test.py
|
SongzzZ/tensorboard
|
4ee9ae8a31524131eb56f7a1dc4aa09d7d186f10
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the Graphs Plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import math
import functools
import os.path
import tensorflow as tf
from google.protobuf import text_format
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.compat.proto import config_pb2
from tensorboard.plugins import base_plugin
from tensorboard.plugins.graph import graphs_plugin
from tensorboard.util import test_util
tf.compat.v1.disable_v2_behavior()
# TODO(stephanwlee): Move more tests into the base class when v2 test
# can write graph and metadata with a TF public API.
_RUN_WITH_GRAPH_WITH_METADATA = ('_RUN_WITH_GRAPH_WITH_METADATA', True, True)
_RUN_WITHOUT_GRAPH_WITH_METADATA = ('_RUN_WITHOUT_GRAPH_WITH_METADATA', False, True)
_RUN_WITH_GRAPH_WITHOUT_METADATA = ('_RUN_WITH_GRAPH_WITHOUT_METADATA', True, False)
_RUN_WITHOUT_GRAPH_WITHOUT_METADATA = ('_RUN_WITHOUT_GRAPH_WITHOUT_METADATA', False, False)
def with_runs(run_specs):
"""Run a test with a bare multiplexer and with a `data_provider`.
The decorated function will receive an initialized `GraphsPlugin`
object as its first positional argument.
The receiver argument of the decorated function must be a `TestCase` instance
that also provides `load_runs`.`
"""
def decorator(fn):
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
(logdir, multiplexer) = self.load_runs(run_specs)
with self.subTest('bare multiplexer'):
ctx = base_plugin.TBContext(logdir=logdir, multiplexer=multiplexer)
fn(self, graphs_plugin.GraphsPlugin(ctx), *args, **kwargs)
with self.subTest('generic data provider'):
flags = argparse.Namespace(generic_data='true')
provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
ctx = base_plugin.TBContext(
flags=flags,
logdir=logdir,
multiplexer=multiplexer,
data_provider=provider,
)
fn(self, graphs_plugin.GraphsPlugin(ctx), *args, **kwargs)
return wrapper
return decorator
class GraphsPluginBaseTest(object):
_METADATA_TAG = 'secret-stats'
_MESSAGE_PREFIX_LENGTH_LOWER_BOUND = 1024
def __init__(self, *args, **kwargs):
super(GraphsPluginBaseTest, self).__init__(*args, **kwargs)
self.plugin = None
def setUp(self):
super(GraphsPluginBaseTest, self).setUp()
def generate_run(self, logdir, run_name, include_graph, include_run_metadata):
"""Create a run"""
raise NotImplementedError('Please implement generate_run')
def load_runs(self, run_specs):
logdir = self.get_temp_dir()
for run_spec in run_specs:
self.generate_run(logdir, *run_spec)
return self.bootstrap_plugin(logdir)
def bootstrap_plugin(self, logdir):
multiplexer = event_multiplexer.EventMultiplexer()
multiplexer.AddRunsFromDirectory(logdir)
multiplexer.Reload()
return (logdir, multiplexer)
@with_runs([_RUN_WITH_GRAPH_WITH_METADATA, _RUN_WITHOUT_GRAPH_WITH_METADATA])
def testRoutesProvided(self, plugin):
"""Tests that the plugin offers the correct routes."""
routes = plugin.get_plugin_apps()
self.assertIsInstance(routes['/graph'], collections.Callable)
self.assertIsInstance(routes['/run_metadata'], collections.Callable)
self.assertIsInstance(routes['/info'], collections.Callable)
class GraphsPluginV1Test(GraphsPluginBaseTest, tf.test.TestCase):
def generate_run(self, logdir, run_name, include_graph, include_run_metadata):
"""Create a run with a text summary, metadata, and optionally a graph."""
tf.compat.v1.reset_default_graph()
k1 = tf.constant(math.pi, name='k1')
k2 = tf.constant(math.e, name='k2')
result = (k1 ** k2) - k1
expected = tf.constant(20.0, name='expected')
error = tf.abs(result - expected, name='error')
message_prefix_value = 'error ' * 1000
true_length = len(message_prefix_value)
assert true_length > self._MESSAGE_PREFIX_LENGTH_LOWER_BOUND, true_length
message_prefix = tf.constant(message_prefix_value, name='message_prefix')
error_message = tf.strings.join([message_prefix,
tf.as_string(error, name='error_string')],
name='error_message')
summary_message = tf.compat.v1.summary.text('summary_message', error_message)
sess = tf.compat.v1.Session()
writer = test_util.FileWriter(os.path.join(logdir, run_name))
if include_graph:
writer.add_graph(sess.graph)
options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
s = sess.run(summary_message, options=options, run_metadata=run_metadata)
writer.add_summary(s)
if include_run_metadata:
writer.add_run_metadata(run_metadata, self._METADATA_TAG)
writer.close()
def _get_graph(self, plugin, *args, **kwargs):
"""Set up runs, then fetch and return the graph as a proto."""
(graph_pbtxt, mime_type) = plugin.graph_impl(
_RUN_WITH_GRAPH_WITH_METADATA[0], *args, **kwargs)
self.assertEqual(mime_type, 'text/x-protobuf')
return text_format.Parse(graph_pbtxt, tf.compat.v1.GraphDef())
@with_runs([
_RUN_WITH_GRAPH_WITH_METADATA,
_RUN_WITH_GRAPH_WITHOUT_METADATA,
_RUN_WITHOUT_GRAPH_WITH_METADATA,
_RUN_WITHOUT_GRAPH_WITHOUT_METADATA])
def test_info(self, plugin):
expected = {
'_RUN_WITH_GRAPH_WITH_METADATA': {
'run': '_RUN_WITH_GRAPH_WITH_METADATA',
'run_graph': True,
'tags': {
'secret-stats': {
'conceptual_graph': False,
'profile': True,
'tag': 'secret-stats',
'op_graph': False,
},
},
},
'_RUN_WITH_GRAPH_WITHOUT_METADATA': {
'run': '_RUN_WITH_GRAPH_WITHOUT_METADATA',
'run_graph': True,
'tags': {},
}
}
if not plugin._data_provider:
# Hack, for now.
expected['_RUN_WITHOUT_GRAPH_WITH_METADATA'] = {
'run': '_RUN_WITHOUT_GRAPH_WITH_METADATA',
'run_graph': False,
'tags': {
'secret-stats': {
'conceptual_graph': False,
'profile': True,
'tag': 'secret-stats',
'op_graph': False,
},
},
}
self.assertItemsEqual(expected, plugin.info_impl('eid'))
@with_runs([_RUN_WITH_GRAPH_WITH_METADATA])
def test_graph_simple(self, plugin):
graph = self._get_graph(
plugin,
tag=None,
is_conceptual=False,
experiment='eid',
)
node_names = set(node.name for node in graph.node)
self.assertEqual({
'k1', 'k2', 'pow', 'sub', 'expected', 'sub_1', 'error',
'message_prefix', 'error_string', 'error_message', 'summary_message',
'summary_message/tag', 'summary_message/serialized_summary_metadata',
}, node_names)
@with_runs([_RUN_WITH_GRAPH_WITH_METADATA])
def test_graph_large_attrs(self, plugin):
key = 'o---;;-;'
graph = self._get_graph(
plugin,
tag=None,
is_conceptual=False,
experiment='eid',
limit_attr_size=self._MESSAGE_PREFIX_LENGTH_LOWER_BOUND,
large_attrs_key=key)
large_attrs = {
node.name: list(node.attr[key].list.s)
for node in graph.node
if key in node.attr
}
self.assertEqual({'message_prefix': [b'value']},
large_attrs)
@with_runs([_RUN_WITH_GRAPH_WITH_METADATA])
def test_run_metadata(self, plugin):
result = plugin.run_metadata_impl(
_RUN_WITH_GRAPH_WITH_METADATA[0], self._METADATA_TAG)
if plugin._data_provider:
# Hack, for now
self.assertEqual(result, None)
else:
(metadata_pbtxt, mime_type) = result
self.assertEqual(mime_type, 'text/x-protobuf')
text_format.Parse(metadata_pbtxt, config_pb2.RunMetadata())
# If it parses, we're happy.
@with_runs([_RUN_WITH_GRAPH_WITHOUT_METADATA])
def test_is_active_with_graph_without_run_metadata(self, plugin):
self.assertTrue(plugin.is_active())
@with_runs([_RUN_WITHOUT_GRAPH_WITH_METADATA])
def test_is_active_without_graph_with_run_metadata(self, plugin):
self.assertTrue(plugin.is_active())
@with_runs([_RUN_WITH_GRAPH_WITH_METADATA])
def test_is_active_with_both(self, plugin):
self.assertTrue(plugin.is_active())
@with_runs([_RUN_WITHOUT_GRAPH_WITHOUT_METADATA])
def test_is_inactive_without_both(self, plugin):
if plugin._data_provider:
# Hack, for now.
self.assertTrue(plugin.is_active())
else:
self.assertFalse(plugin.is_active())
if __name__ == '__main__':
tf.test.main()
| 36.356061
| 127
| 0.698583
|
e132c4c4161daf831aaf2b929b760b62ade255f1
| 865
|
py
|
Python
|
test_master.py
|
markkorput/pyOmxSync
|
790c9c96767b19377e89753fc2095e34847d62bf
|
[
"MIT"
] | 3
|
2018-03-16T12:08:56.000Z
|
2021-02-12T16:03:01.000Z
|
test_master.py
|
markkorput/pyOmxSync
|
790c9c96767b19377e89753fc2095e34847d62bf
|
[
"MIT"
] | 8
|
2016-09-04T19:10:33.000Z
|
2016-09-07T05:52:03.000Z
|
test_master.py
|
markkorput/pyOmxSync
|
790c9c96767b19377e89753fc2095e34847d62bf
|
[
"MIT"
] | 6
|
2017-06-07T10:22:14.000Z
|
2019-09-21T02:33:05.000Z
|
#!/usr/bin/env python2
try:
from omxplayer import OMXPlayer
except ImportError:
print("\n!! Could not import OMXPlayer, see https://github.com/willprice/python-omxplayer-wrapper for install instructions !!\n")
from omxsync import Broadcaster
import sys
if len(sys.argv) < 2:
print('No video specified\nUsage: python test_master.py path/to/video.mp4\n')
sys.exit(1)
try:
print("Load player")
player = OMXPlayer(sys.argv[1])
print("Load broadcaster")
broadcaster = Broadcaster(player, {'verbose': True})
broadcaster.setup()
print("Start playing")
player.play()
while player.playback_status() != "Stopped":
broadcaster.update()
print("Stop player")
player.stop()
print("Exit")
except KeyboardInterrupt:
player.quit()
except Exception as err:
print("An error occured:")
print(err)
| 23.378378
| 133
| 0.682081
|
ccb0f4f458e57bdeadc14906e9ffd130eee1c0fa
| 12,529
|
py
|
Python
|
Tony/clviz_web_tony_edits/densitygraph.py
|
NeuroDataDesign/seelviz-archive
|
cb9bcf7c0f32f0256f71be59dd7d7a9086d0f3b3
|
[
"Apache-2.0"
] | null | null | null |
Tony/clviz_web_tony_edits/densitygraph.py
|
NeuroDataDesign/seelviz-archive
|
cb9bcf7c0f32f0256f71be59dd7d7a9086d0f3b3
|
[
"Apache-2.0"
] | 2
|
2017-04-18T02:50:14.000Z
|
2017-04-18T18:04:20.000Z
|
Tony/clviz_web_tony_edits/densitygraph.py
|
NeuroDataDesign/seelviz-archive
|
cb9bcf7c0f32f0256f71be59dd7d7a9086d0f3b3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from __future__ import print_function
__author__ = 'seelviz'
# import matplotlib as mpl
# mpl.use('Agg')
from skimage import data, img_as_float
from skimage import exposure
import plotly
from plotly.graph_objs import *
import cv2
import collections as col
import math, os, gc
import numpy as np
import nibabel as nib
# Tony's get_brain_figure stuff
from plotly.offline import download_plotlyjs#, init_notebook_mode, iplot
from plotly import tools
#plotly.offline.init_notebook_mode()
import networkx as nx
import pandas as pd
import re
class densitygraph(object):
"""This class includes all the calculations nad operations necessary to go from a graphml of the brain to a graph that includes edges and is colored according to density of nodes."""
#def generate_density_graph(self):
#def get_brain_figure(self, g, plot_title=''):
#def generate_heat_map(self):
def __init__(self, token, graph_path = None):
self._token = token
if graph_path == None:
self._graph = nx.read_graphml(token + '/' + token + '.graphml')
else:
self._graph = graph_path
self._sortedList = None
self._maxEdges = 0
self._scaledEdges = 0
self._heatmapbrain = None
def generate_density_graph(self):
## This finds the maximum number of edges and the densest node.
G = self._graph
maxEdges = 0
densestNode = ""
for i in range(len(G.edges())):
if ((len(G.edges('s' + str(i))) > maxEdges)):
maxEdges = len(G.edges('s' + str(i)))
densestNode = "s" + str(i)
## Find each node with a given number of edges, from 0 edges to maxEdges
## Find and store number of edges for each node in storageDict
## Key is 's1', value is number of edges
storageDict = {}
for n in G.nodes_iter():
storageDict[n] = len(G.edges(n))
orderedNodesEdgeCounts = col.OrderedDict(sorted(storageDict.items(), key=lambda (key, value): int(key.split('s')[1])))
## Create ordered list to visualize data
sortedList = sorted(storageDict.values())
# Calculate basic statistics
statisticsArray = np.array(sortedList)
averageNumberEdges = np.mean(statisticsArray)
stdNumberEdges = np.std(statisticsArray)
print("average edge count:")
print(averageNumberEdges)
print("standard deviation edge count: ")
print(stdNumberEdges)
# using 95th percentile as upper limit (z = 1.96)
upperLimit = averageNumberEdges + 1.96 * stdNumberEdges
print("95th percentile: ")
print(upperLimit)
##unused
## numberEdges is used for plotting (first element is edges for 's1', etc.)
numberEdges = []
k = 0
for i in range(1, (len(G.nodes()) + 1)):
numberEdges.append(orderedNodesEdgeCounts['s' + str(i)])
k = k + 1
## Number of colors is maxEdges
numColors = maxEdges;
# scaledEdges = [float(numberEdges[i])/float(upperLimit) for i in range(len(numberEdges))]
self._scaledEdges = [float(numberEdges[i])/float(maxEdges) for i in range(len(numberEdges))]
##Tweak this to change the heat map scaling for the points. Remove outliers.
##False coloration heatmap below. I've commented it out in this version b/c the rainbows
##are difficult to interpret. I've included a newer red version.
'''
self._heatMapBrain = [
# Let null values (0.0) have color rgb(0, 0, 0)
[0, 'rgb(0, 0, 0)'], #black
# Let first 5-10% (0.05) of the values have color rgb(204, 0, 204)
[0.05, 'rgb(153, 0, 153)'], #purple
[0.1, 'rgb(153, 0, 153)'], #purple
# Let next 10-15% (0.05) of the values have color rgb(204, 0, 204)
[0.1, 'rgb(204, 0, 204)'], #purple
[0.15, 'rgb(204, 0, 204)'], #purple
# Let values between 20-25% have color rgb(0, 0, 153)
[0.15, 'rgb(0, 0, 153)'], #blue
[0.2, 'rgb(0, 0, 153)'], #blue
# Let values between 25-30% have color rgb(0, 0, 204)
[0.2, 'rgb(0, 0, 204)'], #blue
[0.25, 'rgb(0, 0, 204)'], #blue
[0.25, 'rgb(0, 76, 153)'], #blue
[0.3, 'rgb(0, 76, 153)'], #blue
[0.3, 'rgb(0, 102, 204)'], #light blue
[0.35, 'rgb(0, 102, 204)'], #light blue
[0.35, 'rgb(0, 153, 153)'], #light blue
[0.4, 'rgb(0, 153, 153)'], #light blue
[0.4, 'rgb(0, 204, 204)'], #light blue
[0.45, 'rgb(0, 204, 204)'], #light blue
[0.45, 'rgb(0, 153, 76)'],
[0.5, 'rgb(0, 153, 76)'],
[0.5, 'rgb(0, 204, 102)'],
[0.55, 'rgb(0, 204, 102)'],
[0.55, 'rgb(0, 255, 0)'],
[0.6, 'rgb(0, 255, 0)'],
[0.6, 'rgb(128, 255, 0)'],
[0.65, 'rgb(128, 255, 0)'],
[0.65, 'rgb(255, 255, 0)'],
[0.7, 'rgb(255, 255, 0)'],
[0.7, 'rgb(255, 255, 102)'], #
[0.75, 'rgb(255, 255, 102)'], #
[0.75, 'rgb(255, 128, 0)'],
[0.8, 'rgb(255, 128, 0)'],
[0.8, 'rgb(204, 0, 0)'], #
[0.85, 'rgb(204, 0, 0)'], #
[0.85, 'rgb(255, 0, 0)'],
[0.9, 'rgb(255, 0, 0)'],
[0.9, 'rgb(255, 51, 51)'], #
[0.95, 'rgb(255, 51, 51)'], #
[0.95, 'rgb(255, 255, 255)'],
[1.0, 'rgb(255, 255, 255)']
]
'''
self._heatMapBrain = [
# Let null values (0.0) have color rgb(0, 0, 0)
[0, 'rgb(0, 0, 0)'], #black
[0.1, '#7f0000'],
[0.2, '#7f0000'],
[0.2, '#b30000'],
[0.3, '#b30000'],
[0.3, '#d7301f'],
[0.4, '#d7301f'],
[0.4, '#ef6548'],
[0.5, '#ef6548'],
[0.5, '#fc8d59'],
[0.6, '#fc8d59'],
[0.6, '#fdbb84'],
[0.7, '#fdbb84'],
[0.7, '#fdd49e'],
[0.8, '#fdd49e'],
[0.8, '#fee8c8'],
[0.9, '#fee8c8'],
[0.9, '#fff7ec'],
[1.0, '#fff7ec']
]
self._sortedList = sortedList
self._maxEdges = maxEdges
#figure = self.get_brain_figure(G, '')
#plotly.offline.plot(figure, filename = self._token + '/' + self._token + '_density.html')
def get_brain_figure(self, g, plot_title=''):
"""
Returns the plotly figure object for vizualizing a 3d brain network.
g: networkX object of brain
"""
# grab the node positions from the graphML file
V = nx.number_of_nodes(g)
attributes = nx.get_node_attributes(g,'attr')
node_positions_3d = pd.DataFrame(columns=['x', 'y', 'z'], index=range(V))
for n in g.nodes_iter():
node_positions_3d.loc[n] = [int((re.findall('\d+', str(attributes[n])))[0]), int((re.findall('\d+', str(attributes[n])))[1]), int((re.findall('\d+', str(attributes[n])))[2])]
# grab edge endpoints
edge_x = []
edge_y = []
edge_z = []
for e in g.edges_iter():
source_pos = node_positions_3d.loc[e[0]]
target_pos = node_positions_3d.loc[e[1]]
edge_x += [source_pos['x'], target_pos['x'], None]
edge_y += [source_pos['y'], target_pos['y'], None]
edge_z += [source_pos['z'], target_pos['z'], None]
Xlist = []
for i in range(1, len(g.nodes()) + 1):
Xlist.append(int((re.findall('\d+', str(attributes['s' + str(i)])))[0]))
Ylist = []
for i in range(1, len(g.nodes()) + 1):
Ylist.append(int((re.findall('\d+', str(attributes['s' + str(i)])))[1]))
Zlist = []
for i in range(1, len(g.nodes()) + 1):
Zlist.append(int((re.findall('\d+', str(attributes['s' + str(i)])))[2]))
# node style
node_trace = Scatter3d(x=Xlist,
y=Ylist,
z=Zlist,
mode='markers',
# name='regions',
marker=Marker(symbol='dot',
size=6,
opacity=0,
color=self._scaledEdges,
colorscale=self._heatMapBrain),
# text=[str(r) for r in range(V)],
# text=atlas_data['nodes'],
hoverinfo='text')
# edge style
'''edge_trace = Scatter3d(x=edge_x,
y=edge_y,
z=edge_z,
mode='lines',
line=Line(color='cyan', width=1),
hoverinfo='none')'''
# axis style
axis = dict(showbackground=False,
showline=False,
zeroline=False,
showgrid=False,
showticklabels=False)
# overall layout
layout = Layout(title=plot_title,
width=800,
height=900,
showlegend=False,
scene=Scene(xaxis=XAxis(axis),
yaxis=YAxis(axis),
zaxis=ZAxis(axis)),
margin=Margin(t=50),
hovermode='closest',
paper_bgcolor='rgba(1,1,1,1)',
plot_bgcolor='rgb(1,1,1)')
data = Data([node_trace])
fig = Figure(data=data, layout=layout)
return fig
def generate_heat_map(self):
# Get list of all possible number of edges, in order
setOfAllPossibleNumEdges = set(self._sortedList)
listOfAllPossibleNumEdges = list(setOfAllPossibleNumEdges)
#listOfAllScaledEdgeValues = [listOfAllPossibleNumEdges[i]/upperLimit for i in range(len(listOfAllPossibleNumEdges))]
listOfAllScaledEdgeValues = [listOfAllPossibleNumEdges[i]/float(self._maxEdges) for i in range(len(listOfAllPossibleNumEdges))]
#heatMapBrain
data = Data([
Scatter(
y=listOfAllPossibleNumEdges,
marker=Marker(
size=16,
color=listOfAllPossibleNumEdges,
colorbar=ColorBar(
title='Colorbar'
),
colorscale=self._heatMapBrain,
),
mode='markers')
])
layout = Layout(title=self._token + ' false coloration scheme',
width=800,
height=900,
showlegend=False,
margin=Margin(t=50),
hovermode='closest',
xaxis=dict(
title='Number of Unique Colors',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#000000')
),
yaxis=dict(
title='Number of Edges',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#000000')
),
paper_bgcolor='rgba(255,255,255,255)',
plot_bgcolor='rgb(255,255,255)')
mapping = Figure(data=data, layout=layout)
#iplot(mapping, validate=False)
#plotly.offline.plot(mapping, filename = self._token + '/' + self._token + 'heatmap' + '.html')
return mapping
| 35.59375
| 186
| 0.466757
|
0d5e62452e2177ecf590d9c51b26e791defaa506
| 33,069
|
py
|
Python
|
protectwise_connector.py
|
splunk-soar-connectors/protectwise
|
9c72074bfd33a26754034d7c0b94fe3009d99d2e
|
[
"Apache-2.0"
] | null | null | null |
protectwise_connector.py
|
splunk-soar-connectors/protectwise
|
9c72074bfd33a26754034d7c0b94fe3009d99d2e
|
[
"Apache-2.0"
] | null | null | null |
protectwise_connector.py
|
splunk-soar-connectors/protectwise
|
9c72074bfd33a26754034d7c0b94fe3009d99d2e
|
[
"Apache-2.0"
] | null | null | null |
# File: protectwise_connector.py
#
# Copyright (c) 2016-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Phantom imports
import json
import os
import tempfile
import time
from datetime import datetime, timedelta
import phantom.app as phantom
import phantom.rules as ph_rules
import requests
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
from phantom.vault import Vault
from protectwise_consts import *
class ProtectWiseConnector(BaseConnector):
def __init__(self):
# Call the super class
super(ProtectWiseConnector, self).__init__()
self._headers = None
self._base_url = PW_BASE_URL
self._state = {}
def is_positive_non_zero_int(self, value):
try:
value = int(value)
return True if value > 0 else False
except Exception:
return False
def initialize(self):
self._state = self.load_state()
config = self.get_config()
self._headers = {'X-Access-Token': config[PW_JSON_AUTH_TOKEN]}
self._display_dup_artifacts = config.get(PW_JSON_ALLOW_ARTIFACT_DUPLICATES)
self._display_dup_containers = config.get(PW_JSON_ALLOW_CONTAINER_DUPLICATES)
return phantom.APP_SUCCESS
def finalize(self):
self.save_state(self._state)
return phantom.APP_SUCCESS
def _get_sensor_list(self, action_result):
ret_val, resp_json = self._make_rest_call('/sensors', action_result)
if (phantom.is_fail(ret_val)):
return (action_result.get_status(), None)
return (phantom.APP_SUCCESS, resp_json)
def _make_rest_call(self, endpoint, action_result, headers=None, params=None, data=None, method="get", exception_error_codes=[]):
""" Function that makes the REST call to the device, generic function that can be called from various action handlers
Needs to return two values, 1st the phantom.APP_[SUCCESS|ERROR], 2nd the response
"""
if headers is None:
headers = {}
# Get the config
config = self.get_config()
# Create the headers
headers.update(self._headers)
if (method in ['put', 'post']):
headers.update({'Content-Type': 'application/json'})
resp_json = None
# get or post or put, whatever the caller asked us to use, if not specified the default will be 'get'
request_func = getattr(requests, method)
# handle the error in case the caller specified a non-existant method
if (not request_func):
action_result.set_status(phantom.APP_ERROR, PW_ERR_API_UNSUPPORTED_METHOD, method=method)
# Make the call
try:
r = request_func(self._base_url + endpoint, # The complete url is made up of the base_url, the api url and the endpiont
data=json.dumps(data) if data else None, # the data, converted to json string format if present, else just set to None
headers=headers, # The headers to send in the HTTP call
verify=config.get(phantom.APP_JSON_VERIFY, True), # should cert verification be carried out?
params=params) # uri parameters if any
except Exception as e:
return (action_result.set_status(phantom.APP_ERROR, PW_ERR_SERVER_CONNECTION, e), resp_json)
# self.debug_print('REST url: {0}'.format(r.url))
if (hasattr(action_result, 'add_debug_data')):
action_result.add_debug_data({'r_text': r.text if r else 'r is None'})
# Try a json parse, since most REST API's give back the data in json,
# if the device does not return JSONs, then need to implement parsing them some other manner
try:
resp_json = r.json()
except Exception as e:
# r.text is guaranteed to be NON None, it will be empty, but not None
msg_string = PW_ERR_JSON_PARSE.format(raw_text=r.text)
return (action_result.set_status(phantom.APP_ERROR, msg_string, e), resp_json)
# Handle any special HTTP error codes here, many devices return an HTTP error code like 204. The requests module treats these as error,
# so handle them here before anything else, uncomment the following lines in such cases
# if (r.status_code == 201):
# return (phantom.APP_SUCCESS, resp_json)
# Handle/process any errors that we get back from the device
if (200 <= r.status_code <= 399):
# Success
return (phantom.APP_SUCCESS, resp_json)
# Failure
action_result.add_data(resp_json)
details = json.dumps(resp_json).replace('{', '').replace('}', '')
if (r.status_code in exception_error_codes):
# Ok to have this http error for this call, return success, the caller will handle the fact that the response is empty
return (action_result.set_status(phantom.APP_SUCCESS), resp_json)
return (action_result.set_status(phantom.APP_ERROR, PW_ERR_FROM_SERVER.format(status=r.status_code, detail=details)), resp_json)
def _test_connectivity(self, param):
self.save_progress("Attempting to connect to API endpoint...")
self.save_progress("Querying sensor list to validate config")
action_result = self.add_action_result(ActionResult(param))
ret_val, resp_json = self._get_sensor_list(action_result)
if (phantom.is_fail(ret_val)):
self.save_progress("Test Connectivity Failed")
return self.get_status()
self.save_progress("Test Connectivity Passed")
return self.set_status(phantom.APP_SUCCESS)
def _get_packets(self, param):
action_result = self.add_action_result(ActionResult(param))
packet_type = param[PW_JSON_TYPE]
object_id = param[PW_JSON_ID]
sensor_id = param.get(PW_JSON_SENSOR_ID)
packet_type = packet_type.lower()
if packet_type not in VALID_PW_TYPES:
return action_result.set_status(phantom.APP_ERROR, "Invalid type")
info_endpoint = '/pcaps/{0}s/{1}/info'.format(packet_type, object_id)
file_endpoint = '/pcaps/{0}s/{1}'.format(packet_type, object_id)
if (packet_type == "observation"):
if (not sensor_id):
return action_result.set_status(phantom.APP_ERROR, "{0} is required when type is observation".format(PW_JSON_SENSOR_ID))
info_endpoint = '/pcaps/{0}s/{1}/{2}/info'.format(packet_type, sensor_id, object_id)
file_endpoint = '/pcaps/{0}s/{1}/{2}'.format(packet_type, sensor_id, object_id)
ret_val, file_info = self._make_rest_call(info_endpoint, action_result, exception_error_codes=[404, 505])
if (phantom.is_fail(ret_val)):
return action_result.get_status()
if (not file_info):
return action_result.set_status(phantom.APP_SUCCESS, "File not present")
if ('not found' in file_info.get('error', {}).get('message', '').lower()):
return action_result.set_status(phantom.APP_SUCCESS, "File not present")
action_result.add_data(file_info)
# Now download the file
file_name = "{0}.pcap".format(object_id)
if hasattr(Vault, 'get_vault_tmp_dir'):
tmp = tempfile.NamedTemporaryFile(dir=Vault.get_vault_tmp_dir())
else:
tmp = tempfile.NamedTemporaryFile(dir="/vault/tmp/", delete=False)
params = {'filename': file_name}
estimated_size = file_info.get('estimatedSize', None)
ret_val = self._download_file(file_endpoint, action_result, tmp.name, params, estimated_size)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
# MOVE the file to the vault
vault_attach_dict = {}
self.debug_print("Vault file name: {0}".format(file_name))
vault_attach_dict[phantom.APP_JSON_ACTION_NAME] = self.get_action_name()
vault_attach_dict[phantom.APP_JSON_APP_RUN_ID] = self.get_app_run_id()
vault_attach_dict['contains'] = ['pcap']
try:
success, message, vault_id = ph_rules.vault_add(self.get_container_id(), tmp.name, file_name, vault_attach_dict)
except Exception as e:
self.debug_print(phantom.APP_ERR_FILE_ADD_TO_VAULT.format(e))
return action_result.set_status(phantom.APP_ERROR, "Failed to add the file to Vault", e)
if (not success):
self.debug_print("Failed to add file to Vault: {0}".format(message))
return action_result.set_status(phantom.APP_ERROR, "Failed to add the file to Vault: {}".format(message))
action_result.set_summary({'vault_id': vault_id})
return action_result.set_status(phantom.APP_SUCCESS)
def _parse_time(self, param_name, time_str, action_result):
ret_val = None
try:
dt = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%fZ')
dt_tt = dt.timetuple()
ret_val = int(time.mktime(dt_tt)) * 1000
except Exception as e:
action_result.set_status(phantom.APP_ERROR, "Unable to parse {0} value {1}, Error: {2}".format(param_name, time_str, str(e)))
return ret_val
def _handle_time_interval(self, param, action_result):
start_time = param.get(PW_JSON_START_TIME)
end_time = param.get(PW_JSON_END_TIME)
# handle the case where start time is not given and end time is given
if (start_time is None and end_time is not None):
return (action_result.set_status(phantom.APP_ERROR,
"Please specify start_time, it is required if end_time is specified"), None, None)
# if start time is specified, process it
if (start_time):
start_time = self._parse_time(PW_JSON_START_TIME, start_time, action_result)
if (start_time is None):
return (action_result.get_status(), None, None)
# if end time is specified, process it
if (end_time):
end_time = self._parse_time(PW_JSON_END_TIME, end_time, action_result)
if (end_time is None):
return (action_result.get_status(), None, None)
# if start time is not specified, get the default value
if (start_time is None):
# get the start time to use, i.e. current - hours in seconds
start_time = int(time.time() - (int(PW_N_DAYS_HOURS) * (60 * 60)))
# convert it to milliseconds
start_time = start_time * 1000
# if end time is not specified, get the default value
if (end_time is None):
end_time = self._time_now()
if (end_time <= start_time):
return (action_result.set_status(phantom.APP_ERROR,
"Invalid time range, end_time cannot be less than or equal to start_time"), None, None)
return (phantom.APP_SUCCESS, start_time, end_time)
def _hunt_file(self, param):
self.save_progress("Querying hunt file")
action_result = self.add_action_result(ActionResult(param))
file_hash = param[PW_JSON_HASH]
ret_val, start_time, end_time = self._handle_time_interval(param, action_result)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
endpoint = '/reputations/files/{0}'.format(file_hash)
params = {
# 'details': 'threat,ip,domain,device',
'start': start_time,
'end': end_time}
ret_val, response = self._make_rest_call(endpoint, action_result, params=params, exception_error_codes=[404, 505])
if (phantom.is_fail(ret_val)):
return action_result.get_status()
action_result.add_data(response)
info = response.get('info')
summary = action_result.update_summary({})
summary['file_type'] = info.get('type')
summary['id'] = info.get('id')
summary['detected_type'] = info.get('detectedType')
summary['observation_count'] = response.get('observations', {}).get('count', 0)
self.save_progress("Querying hunt file succeeded")
return action_result.set_status(phantom.APP_SUCCESS)
def _hunt_domain(self, param):
self.save_progress("Querying hunt domain")
action_result = self.add_action_result(ActionResult(param))
domain = param[PW_JSON_DOMAIN]
ret_val, start_time, end_time = self._handle_time_interval(param, action_result)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
endpoint = '/reputations/domains/{0}'.format(domain)
params = {
'details': 'threat,domain,device',
'include': 'netflows',
'start': start_time,
'end': end_time}
ret_val, response = self._make_rest_call(endpoint, action_result, params=params, exception_error_codes=[404, 505])
if (phantom.is_fail(ret_val)):
return action_result.get_status()
action_result.add_data(response)
domain_info = response.get('domain')
events = response.get('threat', {}).get('events')
summary = action_result.update_summary({})
if (domain_info):
summary.update({'domain_organization': domain_info.get('organization', '')})
if (events):
summary.update({'event_count': events.get('count', {}).get('total', '')})
else:
summary.update({'event_count': 0})
self.save_progress("Querying hunt domain succeeded")
return action_result.set_status(phantom.APP_SUCCESS)
def _hunt_ip(self, param):
self.save_progress("Querying hunt ip")
action_result = self.add_action_result(ActionResult(param))
ip = param[PW_JSON_IP]
ret_val, start_time, end_time = self._handle_time_interval(param, action_result)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
endpoint = '/reputations/ips/{0}'.format(ip)
params = {
'details': 'threat,ip,device',
'include': 'netflows',
'start': start_time,
'end': end_time}
ret_val, response = self._make_rest_call(endpoint, action_result, params=params, exception_error_codes=[404, 505])
if (phantom.is_fail(ret_val)):
return action_result.get_status()
action_result.add_data(response)
ip_info = response.get('ip')
events = response.get('threat', {}).get('events')
summary = action_result.update_summary({})
if (ip_info):
summary.update({'ip_organization': ip_info.get('organization', '')})
if (events):
summary.update({'event_count': events.get('count', {}).get('total', '')})
else:
summary.update({'event_count': 0})
self.save_progress("Querying hunt ip succeeded")
return action_result.set_status(phantom.APP_SUCCESS)
def _get_first_start_time(self, action_result):
config = self.get_config()
# Get the poll hours
poll_hours = config[PW_JSON_POLL_HOURS]
if not self.is_positive_non_zero_int(poll_hours):
self.save_progress("Please provide a positive integer in 'Ingest events in last N hours'")
return action_result.set_status(phantom.APP_ERROR, "Please provide a positive integer in 'Ingest events in last N hours'"), None
# get the start time to use, i.e. current - poll hours in seconds
start_time = int(time.time() - (int(poll_hours) * (60 * 60)))
# convert it to milliseconds
start_time = start_time * 1000
return phantom.APP_SUCCESS, start_time
def _time_now(self):
return int(time.time() * 1000)
def _get_query_params(self, param, action_result):
# function to separate on poll and poll now
config = self.get_config()
limit = config[PW_JSON_MAX_CONTAINERS]
if not self.is_positive_non_zero_int(limit):
self.save_progress("Please provide a positive integer in 'Maximum events for scheduled polling'")
return action_result.set_status(phantom.APP_ERROR,
"Please provide a positive integer in 'Maximum events for scheduled polling'"), None
query_params = dict()
last_time = self._state.get(PW_JSON_LAST_DATE_TIME)
if self.is_poll_now():
limit = param.get("container_count", 100)
ret_val, query_params["start"] = self._get_first_start_time(action_result)
if (phantom.is_fail(ret_val)):
return action_result.get_status(), None
elif (self._state.get('first_run', True)):
self._state['first_run'] = False
limit = config.get("first_run_max_events", 100)
if not self.is_positive_non_zero_int(limit):
self.save_progress("Please provide a positive integer in 'Maximum events to poll first time'")
return action_result.set_status(phantom.APP_ERROR,
"Please provide a positive integer in 'Maximum events to poll first time'"), None
ret_val, query_params["start"] = self._get_first_start_time(action_result)
if (phantom.is_fail(ret_val)):
return action_result.get_status(), None
elif (last_time):
query_params["start"] = last_time
else:
ret_val, query_params["start"] = self._get_first_start_time(action_result)
if (phantom.is_fail(ret_val)):
return action_result.get_status(), None
query_params["maxLimit"] = limit
query_params["minLimit"] = limit
query_params["end"] = self._time_now()
if (not self.is_poll_now()):
self._state[PW_JSON_LAST_DATE_TIME] = query_params["end"]
return phantom.APP_SUCCESS, query_params
def _get_artifact_name(self, observation):
default_name = "Observation Artifact"
try:
return observation['data']['idsEvent']['description']
except:
pass
try:
return '{0} Observation from {1}'.format(observation['killChainStage'], observation['source'])
except:
pass
try:
return 'Observation from {0}'.format(observation['source'])
except:
pass
return default_name
def _download_file(self, url_to_download, action_result, local_file_path, params, estimated_size=None):
"""Function that downloads the file from a url
Args:
url_to_download: the url of the file to download
action_result: The ActionResult object to hold the status
local_file_path: The local file path that was created.
Return:
A status code of the type phantom.APP_[SUCC|ERR]_XXX.
The size in bytes of the file downloaded.
"""
content_size = 0
# Which percent chunks will the download happen for big files
percent_block = 10
# size that sets a file as big.
# A big file will be downloaded in chunks of percent_block else it will be a synchronous download
big_file_size_bytes = 20 * (1024 * 1024)
self.save_progress("Downloading file from {0} to {1}".format(url_to_download, local_file_path))
self.debug_print("Complete URL", url_to_download)
try:
r = requests.get(self._base_url + url_to_download, headers=self._headers, params=params, stream=True,
timeout=PROTECTWISE_DEFAULT_TIMEOUT)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, "Error downloading file", e)
if (r.status_code != requests.codes.ok): # pylint: disable=E1101
return action_result.set_status(phantom.APP_ERROR, "Server returned status_code: {0}".format(r.status_code))
# get the content length
content_size = r.headers.get('content-length')
if (not content_size and estimated_size is not None):
content_size = estimated_size
if (not content_size):
return action_result.set_status(phantom.APP_ERROR, "Unable to get content length")
self.save_progress(phantom.APP_PROG_FILE_SIZE, value=content_size, type='bytes')
bytes_to_download = int(content_size)
# init to download the whole file in a single read
block_size = bytes_to_download
# if the file is big then download in % increments
if (bytes_to_download > big_file_size_bytes):
block_size = (bytes_to_download * percent_block) / 100
bytes_downloaded = 0
try:
with open(local_file_path, 'wb') as file_handle:
for chunk in r.iter_content(chunk_size=block_size):
if (chunk):
bytes_downloaded += len(chunk)
file_handle.write(chunk)
file_handle.flush()
os.fsync(file_handle.fileno())
self.send_progress(PW_PROG_FINISHED_DOWNLOADING_STATUS, float(bytes_downloaded) / float(bytes_to_download))
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, "Error downloading file", e)
return phantom.APP_SUCCESS
def _create_artifacts_for_event(self, event, action_result, container_index):
artifacts = []
observation_count = event.get('observationCount')
if (not observation_count):
return artifacts
event_id = event['id']
# we need to get the details of the event
ret_val, resp_json = self._make_rest_call('/events/{0}'.format(event_id), action_result)
if (phantom.is_fail(ret_val)):
return self.set_status(phantom.APP_ERROR, "Failed to get events: {0}".format(action_result.get_message()))
observations = resp_json.get('observations')
if (not observations):
return artifacts
for i, observation in enumerate(observations):
self.send_progress("Processing Container # {0} Artifact # {1}".format(container_index, i))
artifact = dict()
artifact['data'] = observation
artifact['source_data_identifier'] = observation['id']
artifact['name'] = self._get_artifact_name(observation)
connection_info = observation.get('connectionInfo')
artifact['cef'] = cef = dict()
artifact['cef_types'] = PW_CEF_CONTAINS
hashes = {}
try:
hashes = observation['data']['fileReputation']['hashes']
except:
pass
if (hashes):
# add the md5 in the hash key, everything else in it's own key
# this is to keep things happy in 2.0 and 2.1
if ('md5' in hashes):
cef['fileHash'] = hashes['md5']
cef['fileHashMd5'] = hashes['md5']
if ('sha256' in hashes):
cef['fileHashSha256'] = hashes['sha256']
if ('sha512' in hashes):
cef['fileHashSha512'] = hashes['sha512']
if ('sha1' in hashes):
cef['fileHashSha1'] = hashes['sha1']
if (connection_info):
cef['sourceAddress'] = connection_info.get('srcIp')
cef['destinationAddress'] = connection_info.get('dstIp')
cef['sourcePort'] = connection_info.get('srcPort')
cef['destinationPort'] = connection_info.get('dstPort')
cef['sourceMacAddress'] = connection_info.get('srcMac')
cef['destinationMacAddress'] = connection_info.get('dstMac')
cef['transportProtocol'] = connection_info.get('layer4Proto')
cef['observationId'] = observation['id']
cef['sensorId'] = observation['sensorId']
if self._display_dup_artifacts is True:
cef['receiptTime'] = self._get_str_from_epoch(int(round(time.time() * 1000)))
artifacts.append(artifact)
return artifacts
def _get_str_from_epoch(self, epoch_milli):
# 2015-07-21T00:27:59Z
return datetime.fromtimestamp(int(epoch_milli) / 1000.0).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
def _save_results(self, results):
containers_processed = 0
for i, result in enumerate(results):
# result is a dictionary of a single container and artifacts
if ('container' not in result):
continue
if ('artifacts' not in result):
# igonore containers without artifacts
continue
if (len(result['artifacts']) == 0):
# igonore containers without artifacts
continue
containers_processed += 1
self.send_progress("Adding Container # {0}".format(i))
ret_val, response, container_id = self.save_container(result['container'])
self.debug_print("save_container returns, value: {0}, reason: {1}, id: {2}".format(ret_val, response, container_id))
if (phantom.is_fail(ret_val)):
continue
if (not container_id):
continue
if ('artifacts' not in result):
continue
artifacts = result['artifacts']
# get the length of the artifact, we might have trimmed it or not
len_artifacts = len(artifacts)
for j, artifact in enumerate(artifacts):
# if it is the last artifact of the last container
if ((j + 1) == len_artifacts):
# mark it such that active playbooks get executed
artifact['run_automation'] = True
artifact['container_id'] = container_id
self.send_progress("Adding Container # {0}, Artifact # {1}".format(i, j))
ret_val, status_string, artifact_id = self.save_artifact(artifact)
self.debug_print("save_artifact returns, value: {0}, reason: {1}, id: {2}".format(ret_val, status_string, artifact_id))
return containers_processed
def _on_poll(self, param):
action_result = ActionResult(param)
# Get the requests based on the type of poll
ret_val, query_params = self._get_query_params(param, action_result)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
ret_val, resp_json = self._make_rest_call('/events', action_result, params=query_params)
if (phantom.is_fail(ret_val)):
return self.set_status(phantom.APP_ERROR, "Failed to get events: {0}".format(action_result.get_message()))
self.save_progress("Total events: {0}".format(resp_json.get('count', 'NA')))
events = resp_json.get('events', [])
no_of_events = len(events)
self.save_progress("Processing {0} events".format(no_of_events))
results = []
for i, event in enumerate(events):
self.send_progress("Processing Container # {0}".format(i))
container = dict()
container['data'] = event
container['source_data_identifier'] = event['id']
if self._display_dup_containers is True:
container['source_data_identifier'] = "{} container_created:{}".format(container['source_data_identifier'],
self._get_str_from_epoch(int(round(time.time() * 1000))))
container['name'] = event['message']
container['start_time'] = self._get_str_from_epoch(event['startedAt'])
container['end_time'] = self._get_str_from_epoch(event['endedAt'])
container['id'] = event['id']
tags = event.get('tags')
if (tags is not None):
container['tags'] = tags.split(',')
artifacts = self._create_artifacts_for_event(event, action_result, i)
results.append({'container': container, 'artifacts': artifacts})
self.send_progress("Done Processing")
self._save_results(results)
# store the date time of the last event
if ((no_of_events) and (not self.is_poll_now())):
config = self.get_config()
last_date_time = events[0]["startedAt"]
self._state[PW_JSON_LAST_DATE_TIME] = last_date_time
date_strings = [x["startedAt"] for x in events]
date_strings = set(date_strings)
if (len(date_strings) == 1):
self.debug_print("Getting all containers with the same date, down to the millisecond."
" That means the device is generating"
" max_containers=({0}) per second. Skipping to the next second to not get stuck.".format(config[PW_JSON_MAX_CONTAINERS]))
self._state[PW_JSON_LAST_DATE_TIME] = int(self._state[PW_JSON_LAST_DATE_TIME]) + 1
return self.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
action = self.get_action_identifier()
if (action == phantom.ACTION_ID_INGEST_ON_POLL):
start_time = time.time()
result = self._on_poll(param)
end_time = time.time()
diff_time = end_time - start_time
human_time = str(timedelta(seconds=int(diff_time)))
self.save_progress("Time taken: {0}".format(human_time))
elif (action == ACTION_ID_TEST_ASSET_CONNECTIVITY):
result = self._test_connectivity(param)
elif (action == ACTION_ID_GET_PACKETS):
result = self._get_packets(param)
elif (action == ACTION_ID_HUNT_IP):
result = self._hunt_ip(param)
elif (action == ACTION_ID_HUNT_DOMAIN):
result = self._hunt_domain(param)
elif (action == ACTION_ID_HUNT_FILE):
result = self._hunt_file(param)
return result
if __name__ == '__main__':
import argparse
import sys
import pudb
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
verify = args.verify
if (username is not None and password is None):
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if (username and password):
login_url = BaseConnector._get_phantom_base_url() + "login"
try:
print("Accessing the Login page")
r = requests.get(login_url, verify=verify, timeout=PROTECTWISE_DEFAULT_TIMEOUT)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=verify, data=data, headers=headers, timeout=PROTECTWISE_DEFAULT_TIMEOUT)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platfrom. Error: " + str(e))
sys.exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = ProtectWiseConnector()
connector.print_progress_message = True
if (session_id is not None):
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0)
| 37.923165
| 143
| 0.628958
|
429503bd083c0b237ec9f247ac58c45386c3d048
| 37,252
|
py
|
Python
|
gcloudoem/datastore/_generated/datastore_pb2.py
|
Kapiche/gcloud-datastore-oem
|
826acf328359b463a9f1e8761442b3a2d549c13e
|
[
"Apache-2.0"
] | 1
|
2015-07-08T17:55:18.000Z
|
2015-07-08T17:55:18.000Z
|
gcloudoem/datastore/_generated/datastore_pb2.py
|
Kapiche/gcloud-datastore-oem
|
826acf328359b463a9f1e8761442b3a2d549c13e
|
[
"Apache-2.0"
] | 8
|
2015-04-30T05:35:50.000Z
|
2015-12-14T11:22:01.000Z
|
gcloudoem/datastore/_generated/datastore_pb2.py
|
Kapiche/gcloud-datastore-oem
|
826acf328359b463a9f1e8761442b3a2d549c13e
|
[
"Apache-2.0"
] | 2
|
2019-01-05T13:33:53.000Z
|
2019-01-06T08:08:51.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/datastore/v1/datastore.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from . import entity_pb2 as google_dot_datastore_dot_v1_dot_entity__pb2
from . import query_pb2 as google_dot_datastore_dot_v1_dot_query__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/datastore/v1/datastore.proto',
package='google.datastore.v1',
syntax='proto3',
serialized_pb=_b('\n#google/datastore/v1/datastore.proto\x12\x13google.datastore.v1\x1a\x1cgoogle/api/annotations.proto\x1a google/datastore/v1/entity.proto\x1a\x1fgoogle/datastore/v1/query.proto\"\x83\x01\n\rLookupRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x36\n\x0cread_options\x18\x01 \x01(\x0b\x32 .google.datastore.v1.ReadOptions\x12&\n\x04keys\x18\x03 \x03(\x0b\x32\x18.google.datastore.v1.Key\"\xa2\x01\n\x0eLookupResponse\x12\x30\n\x05\x66ound\x18\x01 \x03(\x0b\x32!.google.datastore.v1.EntityResult\x12\x32\n\x07missing\x18\x02 \x03(\x0b\x32!.google.datastore.v1.EntityResult\x12*\n\x08\x64\x65\x66\x65rred\x18\x03 \x03(\x0b\x32\x18.google.datastore.v1.Key\"\x84\x02\n\x0fRunQueryRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x36\n\x0cpartition_id\x18\x02 \x01(\x0b\x32 .google.datastore.v1.PartitionId\x12\x36\n\x0cread_options\x18\x01 \x01(\x0b\x32 .google.datastore.v1.ReadOptions\x12+\n\x05query\x18\x03 \x01(\x0b\x32\x1a.google.datastore.v1.QueryH\x00\x12\x32\n\tgql_query\x18\x07 \x01(\x0b\x32\x1d.google.datastore.v1.GqlQueryH\x00\x42\x0c\n\nquery_type\"s\n\x10RunQueryResponse\x12\x34\n\x05\x62\x61tch\x18\x01 \x01(\x0b\x32%.google.datastore.v1.QueryResultBatch\x12)\n\x05query\x18\x02 \x01(\x0b\x32\x1a.google.datastore.v1.Query\"-\n\x17\x42\x65ginTransactionRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\"/\n\x18\x42\x65ginTransactionResponse\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\":\n\x0fRollbackRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\"\x12\n\x10RollbackResponse\"\x83\x02\n\rCommitRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x35\n\x04mode\x18\x05 \x01(\x0e\x32\'.google.datastore.v1.CommitRequest.Mode\x12\x15\n\x0btransaction\x18\x01 \x01(\x0cH\x00\x12\x30\n\tmutations\x18\x06 \x03(\x0b\x32\x1d.google.datastore.v1.Mutation\"F\n\x04Mode\x12\x14\n\x10MODE_UNSPECIFIED\x10\x00\x12\x11\n\rTRANSACTIONAL\x10\x01\x12\x15\n\x11NON_TRANSACTIONAL\x10\x02\x42\x16\n\x14transaction_selector\"f\n\x0e\x43ommitResponse\x12=\n\x10mutation_results\x18\x03 \x03(\x0b\x32#.google.datastore.v1.MutationResult\x12\x15\n\rindex_updates\x18\x04 \x01(\x05\"P\n\x12\x41llocateIdsRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12&\n\x04keys\x18\x01 \x03(\x0b\x32\x18.google.datastore.v1.Key\"=\n\x13\x41llocateIdsResponse\x12&\n\x04keys\x18\x01 \x03(\x0b\x32\x18.google.datastore.v1.Key\"\x87\x02\n\x08Mutation\x12-\n\x06insert\x18\x04 \x01(\x0b\x32\x1b.google.datastore.v1.EntityH\x00\x12-\n\x06update\x18\x05 \x01(\x0b\x32\x1b.google.datastore.v1.EntityH\x00\x12-\n\x06upsert\x18\x06 \x01(\x0b\x32\x1b.google.datastore.v1.EntityH\x00\x12*\n\x06\x64\x65lete\x18\x07 \x01(\x0b\x32\x18.google.datastore.v1.KeyH\x00\x12\x16\n\x0c\x62\x61se_version\x18\x08 \x01(\x03H\x01\x42\x0b\n\toperationB\x1d\n\x1b\x63onflict_detection_strategy\"c\n\x0eMutationResult\x12%\n\x03key\x18\x03 \x01(\x0b\x32\x18.google.datastore.v1.Key\x12\x0f\n\x07version\x18\x04 \x01(\x03\x12\x19\n\x11\x63onflict_detected\x18\x05 \x01(\x08\"\xd5\x01\n\x0bReadOptions\x12L\n\x10read_consistency\x18\x01 \x01(\x0e\x32\x30.google.datastore.v1.ReadOptions.ReadConsistencyH\x00\x12\x15\n\x0btransaction\x18\x02 \x01(\x0cH\x00\"M\n\x0fReadConsistency\x12 \n\x1cREAD_CONSISTENCY_UNSPECIFIED\x10\x00\x12\n\n\x06STRONG\x10\x01\x12\x0c\n\x08\x45VENTUAL\x10\x02\x42\x12\n\x10\x63onsistency_type2\xdb\x06\n\tDatastore\x12~\n\x06Lookup\x12\".google.datastore.v1.LookupRequest\x1a#.google.datastore.v1.LookupResponse\"+\x82\xd3\xe4\x93\x02%\" /v1/projects/{project_id}:lookup:\x01*\x12\x86\x01\n\x08RunQuery\x12$.google.datastore.v1.RunQueryRequest\x1a%.google.datastore.v1.RunQueryResponse\"-\x82\xd3\xe4\x93\x02\'\"\"/v1/projects/{project_id}:runQuery:\x01*\x12\xa6\x01\n\x10\x42\x65ginTransaction\x12,.google.datastore.v1.BeginTransactionRequest\x1a-.google.datastore.v1.BeginTransactionResponse\"5\x82\xd3\xe4\x93\x02/\"*/v1/projects/{project_id}:beginTransaction:\x01*\x12~\n\x06\x43ommit\x12\".google.datastore.v1.CommitRequest\x1a#.google.datastore.v1.CommitResponse\"+\x82\xd3\xe4\x93\x02%\" /v1/projects/{project_id}:commit:\x01*\x12\x86\x01\n\x08Rollback\x12$.google.datastore.v1.RollbackRequest\x1a%.google.datastore.v1.RollbackResponse\"-\x82\xd3\xe4\x93\x02\'\"\"/v1/projects/{project_id}:rollback:\x01*\x12\x92\x01\n\x0b\x41llocateIds\x12\'.google.datastore.v1.AllocateIdsRequest\x1a(.google.datastore.v1.AllocateIdsResponse\"0\x82\xd3\xe4\x93\x02*\"%/v1/projects/{project_id}:allocateIds:\x01*B+\n\x17\x63om.google.datastore.v1B\x0e\x44\x61tastoreProtoP\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_datastore_dot_v1_dot_entity__pb2.DESCRIPTOR,google_dot_datastore_dot_v1_dot_query__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_COMMITREQUEST_MODE = _descriptor.EnumDescriptor(
name='Mode',
full_name='google.datastore.v1.CommitRequest.Mode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MODE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRANSACTIONAL', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NON_TRANSACTIONAL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1178,
serialized_end=1248,
)
_sym_db.RegisterEnumDescriptor(_COMMITREQUEST_MODE)
_READOPTIONS_READCONSISTENCY = _descriptor.EnumDescriptor(
name='ReadConsistency',
full_name='google.datastore.v1.ReadOptions.ReadConsistency',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='READ_CONSISTENCY_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRONG', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVENTUAL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2007,
serialized_end=2084,
)
_sym_db.RegisterEnumDescriptor(_READOPTIONS_READCONSISTENCY)
_LOOKUPREQUEST = _descriptor.Descriptor(
name='LookupRequest',
full_name='google.datastore.v1.LookupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.LookupRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='read_options', full_name='google.datastore.v1.LookupRequest.read_options', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keys', full_name='google.datastore.v1.LookupRequest.keys', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=158,
serialized_end=289,
)
_LOOKUPRESPONSE = _descriptor.Descriptor(
name='LookupResponse',
full_name='google.datastore.v1.LookupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='found', full_name='google.datastore.v1.LookupResponse.found', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='missing', full_name='google.datastore.v1.LookupResponse.missing', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deferred', full_name='google.datastore.v1.LookupResponse.deferred', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=292,
serialized_end=454,
)
_RUNQUERYREQUEST = _descriptor.Descriptor(
name='RunQueryRequest',
full_name='google.datastore.v1.RunQueryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.RunQueryRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='partition_id', full_name='google.datastore.v1.RunQueryRequest.partition_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='read_options', full_name='google.datastore.v1.RunQueryRequest.read_options', index=2,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='google.datastore.v1.RunQueryRequest.query', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gql_query', full_name='google.datastore.v1.RunQueryRequest.gql_query', index=4,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='query_type', full_name='google.datastore.v1.RunQueryRequest.query_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=457,
serialized_end=717,
)
_RUNQUERYRESPONSE = _descriptor.Descriptor(
name='RunQueryResponse',
full_name='google.datastore.v1.RunQueryResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch', full_name='google.datastore.v1.RunQueryResponse.batch', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='google.datastore.v1.RunQueryResponse.query', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=719,
serialized_end=834,
)
_BEGINTRANSACTIONREQUEST = _descriptor.Descriptor(
name='BeginTransactionRequest',
full_name='google.datastore.v1.BeginTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.BeginTransactionRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=836,
serialized_end=881,
)
_BEGINTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='BeginTransactionResponse',
full_name='google.datastore.v1.BeginTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1.BeginTransactionResponse.transaction', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=883,
serialized_end=930,
)
_ROLLBACKREQUEST = _descriptor.Descriptor(
name='RollbackRequest',
full_name='google.datastore.v1.RollbackRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.RollbackRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1.RollbackRequest.transaction', index=1,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=932,
serialized_end=990,
)
_ROLLBACKRESPONSE = _descriptor.Descriptor(
name='RollbackResponse',
full_name='google.datastore.v1.RollbackResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=992,
serialized_end=1010,
)
_COMMITREQUEST = _descriptor.Descriptor(
name='CommitRequest',
full_name='google.datastore.v1.CommitRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.CommitRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mode', full_name='google.datastore.v1.CommitRequest.mode', index=1,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1.CommitRequest.transaction', index=2,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mutations', full_name='google.datastore.v1.CommitRequest.mutations', index=3,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_COMMITREQUEST_MODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='transaction_selector', full_name='google.datastore.v1.CommitRequest.transaction_selector',
index=0, containing_type=None, fields=[]),
],
serialized_start=1013,
serialized_end=1272,
)
_COMMITRESPONSE = _descriptor.Descriptor(
name='CommitResponse',
full_name='google.datastore.v1.CommitResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mutation_results', full_name='google.datastore.v1.CommitResponse.mutation_results', index=0,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='index_updates', full_name='google.datastore.v1.CommitResponse.index_updates', index=1,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1274,
serialized_end=1376,
)
_ALLOCATEIDSREQUEST = _descriptor.Descriptor(
name='AllocateIdsRequest',
full_name='google.datastore.v1.AllocateIdsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.AllocateIdsRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keys', full_name='google.datastore.v1.AllocateIdsRequest.keys', index=1,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1378,
serialized_end=1458,
)
_ALLOCATEIDSRESPONSE = _descriptor.Descriptor(
name='AllocateIdsResponse',
full_name='google.datastore.v1.AllocateIdsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='keys', full_name='google.datastore.v1.AllocateIdsResponse.keys', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1460,
serialized_end=1521,
)
_MUTATION = _descriptor.Descriptor(
name='Mutation',
full_name='google.datastore.v1.Mutation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='insert', full_name='google.datastore.v1.Mutation.insert', index=0,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='update', full_name='google.datastore.v1.Mutation.update', index=1,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='upsert', full_name='google.datastore.v1.Mutation.upsert', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='delete', full_name='google.datastore.v1.Mutation.delete', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='base_version', full_name='google.datastore.v1.Mutation.base_version', index=4,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.datastore.v1.Mutation.operation',
index=0, containing_type=None, fields=[]),
_descriptor.OneofDescriptor(
name='conflict_detection_strategy', full_name='google.datastore.v1.Mutation.conflict_detection_strategy',
index=1, containing_type=None, fields=[]),
],
serialized_start=1524,
serialized_end=1787,
)
_MUTATIONRESULT = _descriptor.Descriptor(
name='MutationResult',
full_name='google.datastore.v1.MutationResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.datastore.v1.MutationResult.key', index=0,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='google.datastore.v1.MutationResult.version', index=1,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conflict_detected', full_name='google.datastore.v1.MutationResult.conflict_detected', index=2,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1789,
serialized_end=1888,
)
_READOPTIONS = _descriptor.Descriptor(
name='ReadOptions',
full_name='google.datastore.v1.ReadOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='read_consistency', full_name='google.datastore.v1.ReadOptions.read_consistency', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1.ReadOptions.transaction', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_READOPTIONS_READCONSISTENCY,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='consistency_type', full_name='google.datastore.v1.ReadOptions.consistency_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=1891,
serialized_end=2104,
)
_LOOKUPREQUEST.fields_by_name['read_options'].message_type = _READOPTIONS
_LOOKUPREQUEST.fields_by_name['keys'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._KEY
_LOOKUPRESPONSE.fields_by_name['found'].message_type = google_dot_datastore_dot_v1_dot_query__pb2._ENTITYRESULT
_LOOKUPRESPONSE.fields_by_name['missing'].message_type = google_dot_datastore_dot_v1_dot_query__pb2._ENTITYRESULT
_LOOKUPRESPONSE.fields_by_name['deferred'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._KEY
_RUNQUERYREQUEST.fields_by_name['partition_id'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._PARTITIONID
_RUNQUERYREQUEST.fields_by_name['read_options'].message_type = _READOPTIONS
_RUNQUERYREQUEST.fields_by_name['query'].message_type = google_dot_datastore_dot_v1_dot_query__pb2._QUERY
_RUNQUERYREQUEST.fields_by_name['gql_query'].message_type = google_dot_datastore_dot_v1_dot_query__pb2._GQLQUERY
_RUNQUERYREQUEST.oneofs_by_name['query_type'].fields.append(
_RUNQUERYREQUEST.fields_by_name['query'])
_RUNQUERYREQUEST.fields_by_name['query'].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name['query_type']
_RUNQUERYREQUEST.oneofs_by_name['query_type'].fields.append(
_RUNQUERYREQUEST.fields_by_name['gql_query'])
_RUNQUERYREQUEST.fields_by_name['gql_query'].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name['query_type']
_RUNQUERYRESPONSE.fields_by_name['batch'].message_type = google_dot_datastore_dot_v1_dot_query__pb2._QUERYRESULTBATCH
_RUNQUERYRESPONSE.fields_by_name['query'].message_type = google_dot_datastore_dot_v1_dot_query__pb2._QUERY
_COMMITREQUEST.fields_by_name['mode'].enum_type = _COMMITREQUEST_MODE
_COMMITREQUEST.fields_by_name['mutations'].message_type = _MUTATION
_COMMITREQUEST_MODE.containing_type = _COMMITREQUEST
_COMMITREQUEST.oneofs_by_name['transaction_selector'].fields.append(
_COMMITREQUEST.fields_by_name['transaction'])
_COMMITREQUEST.fields_by_name['transaction'].containing_oneof = _COMMITREQUEST.oneofs_by_name['transaction_selector']
_COMMITRESPONSE.fields_by_name['mutation_results'].message_type = _MUTATIONRESULT
_ALLOCATEIDSREQUEST.fields_by_name['keys'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._KEY
_ALLOCATEIDSRESPONSE.fields_by_name['keys'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._KEY
_MUTATION.fields_by_name['insert'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._ENTITY
_MUTATION.fields_by_name['update'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._ENTITY
_MUTATION.fields_by_name['upsert'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._ENTITY
_MUTATION.fields_by_name['delete'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._KEY
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['insert'])
_MUTATION.fields_by_name['insert'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['update'])
_MUTATION.fields_by_name['update'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['upsert'])
_MUTATION.fields_by_name['upsert'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['delete'])
_MUTATION.fields_by_name['delete'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['conflict_detection_strategy'].fields.append(
_MUTATION.fields_by_name['base_version'])
_MUTATION.fields_by_name['base_version'].containing_oneof = _MUTATION.oneofs_by_name['conflict_detection_strategy']
_MUTATIONRESULT.fields_by_name['key'].message_type = google_dot_datastore_dot_v1_dot_entity__pb2._KEY
_READOPTIONS.fields_by_name['read_consistency'].enum_type = _READOPTIONS_READCONSISTENCY
_READOPTIONS_READCONSISTENCY.containing_type = _READOPTIONS
_READOPTIONS.oneofs_by_name['consistency_type'].fields.append(
_READOPTIONS.fields_by_name['read_consistency'])
_READOPTIONS.fields_by_name['read_consistency'].containing_oneof = _READOPTIONS.oneofs_by_name['consistency_type']
_READOPTIONS.oneofs_by_name['consistency_type'].fields.append(
_READOPTIONS.fields_by_name['transaction'])
_READOPTIONS.fields_by_name['transaction'].containing_oneof = _READOPTIONS.oneofs_by_name['consistency_type']
DESCRIPTOR.message_types_by_name['LookupRequest'] = _LOOKUPREQUEST
DESCRIPTOR.message_types_by_name['LookupResponse'] = _LOOKUPRESPONSE
DESCRIPTOR.message_types_by_name['RunQueryRequest'] = _RUNQUERYREQUEST
DESCRIPTOR.message_types_by_name['RunQueryResponse'] = _RUNQUERYRESPONSE
DESCRIPTOR.message_types_by_name['BeginTransactionRequest'] = _BEGINTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['BeginTransactionResponse'] = _BEGINTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['RollbackRequest'] = _ROLLBACKREQUEST
DESCRIPTOR.message_types_by_name['RollbackResponse'] = _ROLLBACKRESPONSE
DESCRIPTOR.message_types_by_name['CommitRequest'] = _COMMITREQUEST
DESCRIPTOR.message_types_by_name['CommitResponse'] = _COMMITRESPONSE
DESCRIPTOR.message_types_by_name['AllocateIdsRequest'] = _ALLOCATEIDSREQUEST
DESCRIPTOR.message_types_by_name['AllocateIdsResponse'] = _ALLOCATEIDSRESPONSE
DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION
DESCRIPTOR.message_types_by_name['MutationResult'] = _MUTATIONRESULT
DESCRIPTOR.message_types_by_name['ReadOptions'] = _READOPTIONS
LookupRequest = _reflection.GeneratedProtocolMessageType('LookupRequest', (_message.Message,), dict(
DESCRIPTOR = _LOOKUPREQUEST,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.LookupRequest)
))
_sym_db.RegisterMessage(LookupRequest)
LookupResponse = _reflection.GeneratedProtocolMessageType('LookupResponse', (_message.Message,), dict(
DESCRIPTOR = _LOOKUPRESPONSE,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.LookupResponse)
))
_sym_db.RegisterMessage(LookupResponse)
RunQueryRequest = _reflection.GeneratedProtocolMessageType('RunQueryRequest', (_message.Message,), dict(
DESCRIPTOR = _RUNQUERYREQUEST,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.RunQueryRequest)
))
_sym_db.RegisterMessage(RunQueryRequest)
RunQueryResponse = _reflection.GeneratedProtocolMessageType('RunQueryResponse', (_message.Message,), dict(
DESCRIPTOR = _RUNQUERYRESPONSE,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.RunQueryResponse)
))
_sym_db.RegisterMessage(RunQueryResponse)
BeginTransactionRequest = _reflection.GeneratedProtocolMessageType('BeginTransactionRequest', (_message.Message,), dict(
DESCRIPTOR = _BEGINTRANSACTIONREQUEST,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.BeginTransactionRequest)
))
_sym_db.RegisterMessage(BeginTransactionRequest)
BeginTransactionResponse = _reflection.GeneratedProtocolMessageType('BeginTransactionResponse', (_message.Message,), dict(
DESCRIPTOR = _BEGINTRANSACTIONRESPONSE,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.BeginTransactionResponse)
))
_sym_db.RegisterMessage(BeginTransactionResponse)
RollbackRequest = _reflection.GeneratedProtocolMessageType('RollbackRequest', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKREQUEST,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.RollbackRequest)
))
_sym_db.RegisterMessage(RollbackRequest)
RollbackResponse = _reflection.GeneratedProtocolMessageType('RollbackResponse', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKRESPONSE,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.RollbackResponse)
))
_sym_db.RegisterMessage(RollbackResponse)
CommitRequest = _reflection.GeneratedProtocolMessageType('CommitRequest', (_message.Message,), dict(
DESCRIPTOR = _COMMITREQUEST,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.CommitRequest)
))
_sym_db.RegisterMessage(CommitRequest)
CommitResponse = _reflection.GeneratedProtocolMessageType('CommitResponse', (_message.Message,), dict(
DESCRIPTOR = _COMMITRESPONSE,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.CommitResponse)
))
_sym_db.RegisterMessage(CommitResponse)
AllocateIdsRequest = _reflection.GeneratedProtocolMessageType('AllocateIdsRequest', (_message.Message,), dict(
DESCRIPTOR = _ALLOCATEIDSREQUEST,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.AllocateIdsRequest)
))
_sym_db.RegisterMessage(AllocateIdsRequest)
AllocateIdsResponse = _reflection.GeneratedProtocolMessageType('AllocateIdsResponse', (_message.Message,), dict(
DESCRIPTOR = _ALLOCATEIDSRESPONSE,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.AllocateIdsResponse)
))
_sym_db.RegisterMessage(AllocateIdsResponse)
Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict(
DESCRIPTOR = _MUTATION,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.Mutation)
))
_sym_db.RegisterMessage(Mutation)
MutationResult = _reflection.GeneratedProtocolMessageType('MutationResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATIONRESULT,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.MutationResult)
))
_sym_db.RegisterMessage(MutationResult)
ReadOptions = _reflection.GeneratedProtocolMessageType('ReadOptions', (_message.Message,), dict(
DESCRIPTOR = _READOPTIONS,
__module__ = 'google.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.ReadOptions)
))
_sym_db.RegisterMessage(ReadOptions)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\027com.google.datastore.v1B\016DatastoreProtoP\001'))
# @@protoc_insertion_point(module_scope)
| 41.762332
| 4,517
| 0.763315
|
9d5b506a97e4adbda7f90767b101a9c549d3aaf0
| 1,782
|
py
|
Python
|
qingstor/qsctl/commands/rb.py
|
frostyplanet/qsctl
|
6f151638e802d6059db2e9b3fb6a519e92804e4f
|
[
"Apache-2.0"
] | null | null | null |
qingstor/qsctl/commands/rb.py
|
frostyplanet/qsctl
|
6f151638e802d6059db2e9b3fb6a519e92804e4f
|
[
"Apache-2.0"
] | null | null | null |
qingstor/qsctl/commands/rb.py
|
frostyplanet/qsctl
|
6f151638e802d6059db2e9b3fb6a519e92804e4f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# =========================================================================
# Copyright (C) 2016 Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import unicode_literals
import sys
from .base import BaseCommand
from ..constants import HTTP_OK_NO_CONTENT
class RbCommand(BaseCommand):
command = "rb"
usage = "%(prog)s <bucket> [-c <conf_file> --force]"
@classmethod
def add_extra_arguments(cls, parser):
parser.add_argument("bucket", help="Name of the bucket to be deleted")
parser.add_argument(
"--force",
action="store_true",
dest="force",
help="Forcely delete a nonempty bucket"
)
return parser
@classmethod
def send_request(cls):
if cls.options.force == True:
cls.remove_multiple_keys(cls.options.bucket)
resp = cls.current_bucket.delete()
if resp.status_code != HTTP_OK_NO_CONTENT:
cls.uni_print(resp.content)
else:
cls.uni_print("Bucket <%s> deleted" % cls.options.bucket)
| 33.622642
| 78
| 0.587542
|
4d57d00d91d8be422770e5a6444277d069836f3d
| 1,405
|
py
|
Python
|
setup.py
|
kells4real/date_literal
|
55960780a30e63cf837263e11a7ab9226f7aadc0
|
[
"MIT"
] | 2
|
2021-08-06T10:56:32.000Z
|
2022-03-01T11:28:13.000Z
|
setup.py
|
kells4real/date_literal
|
55960780a30e63cf837263e11a7ab9226f7aadc0
|
[
"MIT"
] | null | null | null |
setup.py
|
kells4real/date_literal
|
55960780a30e63cf837263e11a7ab9226f7aadc0
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
setup_requires=['wheel'],
name='date_time_literal',
packages=['date_time_literal'],
version='1.0.6',
long_description=long_description,
long_description_content_type='text/markdown',
description='date-time-literal is '
'a python module that helps convert date-time or'
' date to literal days, hours, seconds, or even minutes. Compare two DateTime or Date objects, by'
' converting the objects to literal days, hours, minutes, or even seconds if you want to be precise. ',
author='Kelvin Sajere',
author_email='kells4real@gmail.com',
url='https://github.com/kells4real/date_literal',
download_url='https://github.com/kells4real/date_literal/archive/refs/tags/1.0.0.tar.gz',
keywords=['date-time', 'date', 'literal', 'date converter', 'literal date', 'date-time converter',
'django', 'python', 'module', 'python package', 'date-time literal', 'convert time', 'convert date-time'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.5',
)
| 45.322581
| 120
| 0.669039
|
6ac225cb58504719de2d29286ccd7f58efc11302
| 1,934
|
py
|
Python
|
lagom/envs/vec_env/dummy_vec_env.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
lagom/envs/vec_env/dummy_vec_env.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
lagom/envs/vec_env/dummy_vec_env.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
import numpy as np
from collections import OrderedDict
from lagom.envs.spaces import Box, Dict
from .base import VecEnv
class DummyVecEnv(VecEnv):
def __init__(self, list_make_env):
self.envs = [make_env() for make_env in list_make_env]
observation_space = self.envs[0].observation_space
action_space = self.envs[0].action_space
super().__init__(len(list_make_env), observation_space, action_space)
assert isinstance(self.observation_space, (Box, Dict)) # enforce observation space either Box or Dict
def step_async(self, actions):
self.actions = actions
def step_wait(self):
outputs = []
for env, action in zip(self.envs, self.actions):
observation, reward, done, info = env.step(action)
if done:
observation = env.reset()
outputs.append([observation, reward, done, info])
observations, rewards, dones, infos = zip(*outputs)
observations = self._process_observations(observations)
rewards = np.array(rewards)
dones = np.array(dones)
return observations, rewards, dones, infos
def reset(self):
observations = [env.reset() for env in self.envs]
return self._process_observations(observations)
def close(self):
return
def _process_observations(self, observations):
if isinstance(self.observation_space, Box):
return np.stack(observations)
elif isinstance(self.observation_space, Dict):
spaces = self.observation_space.spaces
outputs = []
for key in spaces.keys():
outputs.append((key, np.stack([observation[key] for observation in observations], axis=0)))
return OrderedDict(outputs)
else:
raise TypeError('Only Box and Dict are supported. ')
| 35.814815
| 110
| 0.628232
|
c41b6ba7516caf378753ca85046c066ce5dee6b6
| 158
|
py
|
Python
|
zhengfang/bdu/model.py
|
Ethan-Xie/python_study
|
0e251709de37d38e3ea9af2202d8f94766d3a64f
|
[
"MIT"
] | null | null | null |
zhengfang/bdu/model.py
|
Ethan-Xie/python_study
|
0e251709de37d38e3ea9af2202d8f94766d3a64f
|
[
"MIT"
] | null | null | null |
zhengfang/bdu/model.py
|
Ethan-Xie/python_study
|
0e251709de37d38e3ea9af2202d8f94766d3a64f
|
[
"MIT"
] | null | null | null |
# author: HuYong
# coding=utf-8
class BDU_Student:
def __init__(self , number , password):
self.number = number
self.password = password
| 19.75
| 43
| 0.651899
|
85fc275976970e367cfa077f58c2298a00417d58
| 6,856
|
py
|
Python
|
src/stk/serialization/json/serializers/constructed_molecule/constructed_molecule.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
src/stk/serialization/json/serializers/constructed_molecule/constructed_molecule.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
src/stk/serialization/json/serializers/constructed_molecule/constructed_molecule.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
"""
Constructed Molecule JSONizer
=============================
"""
from stk.molecular import InchiKey, MoleculeKeyMaker
from ..molecule import MoleculeJsonizer
class ConstructedMoleculeJsonizer:
"""
Abstract base class for creating JSONs of constructed molecules.
See Also
--------
:class:`.MoleculeJsonizer`
Notes
-----
You might notice that the public methods of this abstract base
class are implemented. These are just default implementations,
which can be safely ignored or overridden, when implementing
subclasses. However, the default implementation can be used
directly, if it suits your needs.
Examples
--------
*Converting a Constructed Molecule to JSON*
You want get a JSON representation of a
:class:`.ConstructedMolecule`
.. testcode:: converting-a-molecule-to-json
import stk
# Make the molecule you want jsonize.
polymer = stk.ConstructedMolecule(
topology_graph=stk.polymer.Linear(
building_blocks=(
stk.BuildingBlock('BrCCBr', [stk.BromoFactory()]),
),
repeating_unit='A',
num_repeating_units=3,
)
)
# Make a JSONizer.
jsonizer = stk.ConstructedMoleculeJsonizer()
# Get the JSON.
json = jsonizer.to_json(polymer)
*Adding Additional Molecular Keys*
Apart from atoms, bonds and the position matrix, the JSON
representation holds additional fields, one for each
:class:`.MoleculeKeyMaker` provided to the initializer
.. testcode:: adding-additional-molecular-keys
import stk
# Make the molecule you want jsonize.
polymer = stk.ConstructedMolecule(
topology_graph=stk.polymer.Linear(
building_blocks=(
stk.BuildingBlock('BrCCBr', [stk.BromoFactory()]),
),
repeating_unit='A',
num_repeating_units=3,
)
)
# Make a JSONizer.
jsonizer = stk.ConstructedMoleculeJsonizer()
# Get the JSON.
json = jsonizer.to_json(polymer)
In this case, ``json`` will look something like
.. code-block:: python
{
# A tuple of JSON atom representations.
'atoms': (...),
# A tuple of JSON bond representations.
'bonds': (...),
'InChI': 'The InChI of the molecule',
'InChIKey': 'The InChIKey of the molecule',
}
For every :class:`.MoleculeKeyMaker` provided to `key_makers`,
a new key will be added to the JSON representation, with its name
given by :meth:`.MoleculeKeyMaker.get_key_name` and the value
given by :meth:`.MoleculeKeyMaker.get_key`.
"""
def __init__(
self,
key_makers=(InchiKey(), ),
):
"""
Initializes a :class:`.ConstructedMoleculeJsonizer`.
Parameters
----------
key_makers : :class:`tuple` of :class:`.MoleculeKeyMaker`
Used to make the keys of molecules, which should be
included in their JSON representations. Keys allow
molecular data to reference itself when split across
multiple JSONs.
"""
self._jsonizer = MoleculeJsonizer(key_makers=())
self._key_makers = key_makers
def to_json(self, molecule):
"""
Serialize `molecule`.
Parameters
----------
molecule : :class:`.ConstructedMolecule`
The constructed molecule to serialize.
Returns
-------
:class:`dict`
A JSON representation of `molecule`.
"""
def get_keys(building_block):
return {
key_maker.get_key_name():
key_maker.get_key(building_block)
for key_maker in self._key_makers
if isinstance(key_maker, MoleculeKeyMaker)
}
building_block_indices = {
building_block: index
for index, building_block
in enumerate(molecule.get_building_blocks())
}
building_block_indices[None] = None
def atom_info_to_json(atom_info):
if atom_info.get_building_block() is None:
return (
None,
None,
None,
)
return (
building_block_indices[atom_info.get_building_block()],
atom_info.get_building_block_id(),
atom_info.get_building_block_atom().get_id(),
)
def bond_info_to_json(bond_info):
return (
building_block_indices[bond_info.get_building_block()],
bond_info.get_building_block_id(),
)
molecule_json = self._jsonizer.to_json(molecule)
constructed_molecule_json = {
'BB': tuple(map(
get_keys,
molecule.get_building_blocks(),
)),
'aI': tuple(map(
atom_info_to_json,
molecule.get_atom_infos(),
)),
'bI': tuple(map(
bond_info_to_json,
molecule.get_bond_infos(),
)),
'nBB': tuple(map(
molecule.get_num_building_block,
molecule.get_building_blocks(),
)),
}
for key_maker in self._key_makers:
key_name = key_maker.get_key_name()
key = key_maker.get_key(molecule)
molecule_json['molecule'][key_name] = key
molecule_json['matrix'][key_name] = key
constructed_molecule_json[key_name] = key
building_block_jsons = tuple(map(
self._jsonizer.to_json,
molecule.get_building_blocks(),
))
def is_molecule_key_maker(key_maker):
return isinstance(key_maker, MoleculeKeyMaker)
for key_maker in filter(
is_molecule_key_maker,
self._key_makers,
):
key_name = key_maker.get_key_name()
for building_block, json in zip(
molecule.get_building_blocks(),
building_block_jsons,
):
key = key_maker.get_key(building_block)
json['molecule'][key_name] = key
json['matrix'][key_name] = key
return {
'molecule': molecule_json['molecule'],
'constructedMolecule': constructed_molecule_json,
'matrix': molecule_json['matrix'],
'buildingBlocks': building_block_jsons,
}
def __str__(self):
return repr(self)
def __repr__(self):
return f'{self.__class__.__name__}({self._key_makers!r})'
| 29.424893
| 71
| 0.563448
|
71eb8eac1a3f9989eb0bbea7144f9cb5960dd376
| 2,781
|
py
|
Python
|
tests/service/test_onedocker.py
|
zhuang-93/FBPCS
|
20dbb69fe92bd7d6c2b1b106ea2bdf2909dfcfa2
|
[
"MIT"
] | null | null | null |
tests/service/test_onedocker.py
|
zhuang-93/FBPCS
|
20dbb69fe92bd7d6c2b1b106ea2bdf2909dfcfa2
|
[
"MIT"
] | null | null | null |
tests/service/test_onedocker.py
|
zhuang-93/FBPCS
|
20dbb69fe92bd7d6c2b1b106ea2bdf2909dfcfa2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import patch
from libfb.py.asyncio.mock import AsyncMock
from fbpcs.entity.container_instance import ContainerInstance, ContainerInstanceStatus
from fbpcs.service.onedocker import OneDockerService
class TestOneDockerService(unittest.TestCase):
@patch("fbpcs.service.container.ContainerService")
def setUp(self, MockContainerService):
container_svc = MockContainerService()
self.onedocker_svc = OneDockerService(container_svc)
def test_start_container(self):
mocked_container_info = ContainerInstance(
"arn:aws:ecs:region:account_id:task/container_id",
"192.0.2.0",
ContainerInstanceStatus.STARTED,
)
self.onedocker_svc.container_svc.create_instances_async = AsyncMock(
return_value=[mocked_container_info]
)
returned_container_info = self.onedocker_svc.start_container(
"task_def", "project/exe_name", "cmd_args"
)
self.assertEqual(returned_container_info, mocked_container_info)
def test_start_containers(self):
mocked_container_info = [
ContainerInstance(
"arn:aws:ecs:region:account_id:task/container_id_1",
"192.0.2.0",
ContainerInstanceStatus.STARTED,
),
ContainerInstance(
"arn:aws:ecs:region:account_id:task/container_id_2",
"192.0.2.1",
ContainerInstanceStatus.STARTED,
),
]
self.onedocker_svc.container_svc.create_instances_async = AsyncMock(
return_value=mocked_container_info
)
returned_container_info = self.onedocker_svc.start_containers(
"task_def", "project/exe_name", ["--k1=v1", "--k2=v2"]
)
self.assertEqual(returned_container_info, mocked_container_info)
def test_get_cmd(self):
package_name = "project/exe_name"
cmd_args = "--k1=v1 --k2=v2"
timeout = 3600
expected_cmd_without_timeout = "python3.8 -m one_docker_runner --package_name=project/exe_name --cmd='/root/one_docker/package/exe_name --k1=v1 --k2=v2'"
expected_cmd_with_timeout = expected_cmd_without_timeout + " --timeout=3600"
cmd_without_timeout = self.onedocker_svc._get_cmd(package_name, cmd_args)
cmd_with_timeout = self.onedocker_svc._get_cmd(package_name, cmd_args, timeout)
self.assertEqual(expected_cmd_without_timeout, cmd_without_timeout)
self.assertEqual(expected_cmd_with_timeout, cmd_with_timeout)
| 42.136364
| 161
| 0.68968
|
90aafeb92b40a806e730f93fa691df4cd0f15b28
| 1,715
|
py
|
Python
|
data/p3BR/R2/benchmark/startPyquil184.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startPyquil184.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startPyquil184.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=34
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(2) # number=31
prog += CZ(0,2) # number=32
prog += H(2) # number=33
prog += X(2) # number=12
prog += H(2) # number=25
prog += CZ(0,2) # number=26
prog += H(2) # number=27
prog += H(1) # number=7
prog += CZ(2,1) # number=8
prog += RX(-0.3989822670059037,1) # number=30
prog += H(1) # number=9
prog += H(1) # number=18
prog += CZ(2,1) # number=19
prog += H(1) # number=20
prog += Y(1) # number=14
prog += H(1) # number=22
prog += CZ(2,1) # number=23
prog += H(1) # number=24
prog += Z(2) # number=3
prog += X(1) # number=17
prog += Y(2) # number=5
prog += X(2) # number=21
prog += CNOT(1,0) # number=15
prog += CNOT(1,0) # number=16
prog += X(2) # number=28
prog += X(2) # number=29
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil184.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 24.5
| 64
| 0.570845
|
185f26b1a2dec43334c8610807983f45792119ee
| 413
|
py
|
Python
|
courses/backend/Learn Django by Creating Projects/projects/helloworld/helloworld_project/asgi.py
|
Nahid-Hassan/fullstack-software-development
|
892ffb33e46795061ea63378279a6469de317b1a
|
[
"CC0-1.0"
] | null | null | null |
courses/backend/Learn Django by Creating Projects/projects/helloworld/helloworld_project/asgi.py
|
Nahid-Hassan/fullstack-software-development
|
892ffb33e46795061ea63378279a6469de317b1a
|
[
"CC0-1.0"
] | null | null | null |
courses/backend/Learn Django by Creating Projects/projects/helloworld/helloworld_project/asgi.py
|
Nahid-Hassan/fullstack-software-development
|
892ffb33e46795061ea63378279a6469de317b1a
|
[
"CC0-1.0"
] | null | null | null |
"""
ASGI config for helloworld_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'helloworld_project.settings')
application = get_asgi_application()
| 24.294118
| 78
| 0.79661
|
d288e767d5cc5ac93ce3b8a6ae18992931f800f9
| 15,175
|
py
|
Python
|
src/tools/dev/masonry/src/core.py
|
spetruzza/visit
|
3041e007790a5fd0716f5c1041004f1b1223093b
|
[
"BSD-3-Clause"
] | null | null | null |
src/tools/dev/masonry/src/core.py
|
spetruzza/visit
|
3041e007790a5fd0716f5c1041004f1b1223093b
|
[
"BSD-3-Clause"
] | null | null | null |
src/tools/dev/masonry/src/core.py
|
spetruzza/visit
|
3041e007790a5fd0716f5c1041004f1b1223093b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# file: core.py
# author: Cyrus Harrison <cyrush@llnl.gov>
#
#
import os
import subprocess
import datetime
import json
import errno
import webbrowser
from os.path import join as pjoin
__all__ = ["Context",
"shell",
"svn",
"cmake",
"make",
"inorder"]
# ----------------------------------------------------------------------------
# Method: mkdir_p
#
# Programmer: Cyrus Harrison
# Date: Mon Sept 30 2013
#
# Helper that does recursive folder creation.
#
# Recipe from:
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
# ----------------------------------------------------------------------------
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# ----------------------------------------------------------------------------
# Method: _decode_list
#
# Programmer: Cyrus Harrison
# Date: Fri Jan 11 2013
#
# Helper which decodes json unicode values (in lists) to standard strings.
#
# Recipe from:
# http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-unicode-ones-from-json-in-python
# ----------------------------------------------------------------------------
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
# ----------------------------------------------------------------------------
# Method: _decode_dict
#
# Programmer: Cyrus Harrison
# Date: Fri Jan 11 2013
#
# Helper which decodes json unicode values (in dictonaries) to standard strings.
#
# Recipe from:
# http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-unicode-ones-from-json-in-python
# ----------------------------------------------------------------------------
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def json_loads(jsons):
if os.path.isfile(jsons):
jsons=open(jsons).load()
return json.loads(jsons,object_hook=_decode_dict)
def json_dumps(v):
return json.dumps(v,indent=2)
def timenow():
return datetime.datetime.now()
def timestamp(t=None,sep="_"):
""" Creates a timestamp that can easily be included in a filename. """
if t is None:
t = timenow()
sargs = (t.year,t.month,t.day,t.hour,t.minute,t.second)
sbase = "".join(["%04d",sep,"%02d",sep,"%02d",sep,"%02d",sep,"%02d",sep,"%02d"])
tstamp = sbase % sargs
return {"key": tstamp,
"year": t.year,
"month": t.month,
"day": t.day,
"hour": t.hour,
"minute": t.minute,
"second": t.second}
def timedelta(t_start,t_end):
t_delta = t_end - t_start
days, seconds = t_delta.days, t_delta.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
seconds = seconds % 60
return {"key": "%02d_%02d_%02d" % (hours, minutes, seconds),
"hours":hours,
"minutes":minutes,
"seconds": seconds}
def sexe(cmd,ret_output=False,echo = False,env=None):
""" Helper for executing shell commands. """
kwargs = {"shell":True}
if not env is None:
kwargs["env"] = env
if echo:
print "[exe: %s]" % cmd
if ret_output:
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.STDOUT
p = subprocess.Popen(cmd,**kwargs)
res =p.communicate()[0]
return p.returncode,res
else:
return subprocess.call(cmd,**kwargs),""
class Context(object):
def __init__(self,enable_logging=True,log_dir=None):
self.enable_logging = enable_logging
if enable_logging and log_dir is None:
log_dir = pjoin(os.getcwd(),"_logs")
self.log_dir = log_dir
if not self.log_dir is None and not os.path.isdir(log_dir):
mkdir_p(log_dir)
self.actions = {}
self.triggers = {}
def to_dict(self):
r = {"context":{"actions":{},
"triggers":{},
"log_dir":self.log_dir,
"enable_logging":self.enable_logging}}
for k,v in self.actions.items():
r["context"]["actions"][k] = v.params
for k,v in self.triggers.items():
r["context"]["triggers"][k] = v.params
return r
def to_json(self):
return json_dumps(self.to_dict())
@classmethod
def load_dict(cls,params):
if params.has_key("context"):
res = Context(enable_logging = params["context"]["enable_logging"],
log_dir = params["context"]["log_dir"])
for k,v in params["context"]["actions"].items():
res.actions[k] = Action.load_dict(v)
for k,v in params["context"]["triggers"].items():
res.triggers[k] = Action.load_dict(v)
return res
@classmethod
def load_json(cls,jsons):
if os.path.isfile(jsons):
jsons = open(jsons).read()
params = json_loads(jsons)
return cls.load_dict(params)
def fire(self,trigger_name,key=None):
t = self.triggers[trigger_name]
if key is None:
key = self.unique_key()
key += "_" + trigger_name
res = t.fire(self,trigger_name,key)
self.log(key,res)
return res
def log(self,key,result,tag=None):
if self.log_dir is None:
return
if not tag is None:
ofname = pjoin(self.log_dir,"log_" + key + "_" + tag +".json")
else:
ofname = pjoin(self.log_dir,"log_" + key + ".json")
try:
ofile = open(ofname,"w")
ofile.write(json.dumps(result,indent=2))
ofile.close()
# create link
lastlink = pjoin(self.log_dir,"last.json")
if os.path.islink(lastlink):
os.unlink(lastlink)
os.symlink(ofname,lastlink)
except Exception as e:
print "<logging error> failed to write results to %s" % ofname
raise e
def unique_key(self):
return timestamp()["key"]
class Action(object):
def __init__(self):
self.params = {}
def to_dict(self):
return dict(self.params)
def to_json(self):
return json_dumps(self.to_dict())
@classmethod
def load_dict(cls,params):
if params.has_key("type"):
atype = params["type"]
aparams = dict(params)
del aparams["type"]
return globals()[atype](**aparams)
@classmethod
def load_json(cls,jsons):
if os.path.isfile(jsons):
jsons = open(jsons).read()
params = json_loads(jsons)
return cls.load_dict(params)
class ShellAction(Action):
def __init__(self,
cmd,
type="shell",
working_dir=None,
description=None,
halt_on_error=True,
env=None):
super(ShellAction,self).__init__()
self.params["cmd"] = cmd
self.params["type"] = type
self.params["halt_on_error"] = halt_on_error
if working_dir is None:
working_dir = os.getcwd()
self.params["working_dir"] = working_dir
if description is None:
description = ""
self.params["description"] = description
self.params["env"] = env
def execute(self,base,key,tag,parent_res):
t_start = timenow();
res = {"action":
{"key": key,
"type":self.params["type"],
"name":tag,
"cmd": self.params["cmd"],
"description": self.params["description"],
"working_dir": self.params["working_dir"],
"env": self.params["env"],
"start_time": timestamp(t_start),
"halt_on_error": self.params["halt_on_error"],
"finish_time": None,
"elapsed_time": None,
"output": None}
}
parent_res["trigger"]["active_actions"] = [res]
base.log(key=key,result=parent_res)
cwd = os.path.abspath(os.getcwd())
env = os.environ.copy()
if not self.params["env"] is None:
env.update(self.params["env"])
try:
if not os.path.isdir(self.params["working_dir"]):
mkdir_p(self.params["working_dir"])
print "[chdir to: %s]" % self.params["working_dir"]
os.chdir(self.params["working_dir"])
rcode, rout = sexe(self.params["cmd"],
ret_output=True,
echo=True,
env=env)
res["action"]["output"] = rout
res["action"]["return_code"] = rcode
except KeyboardInterrupt as e:
res["action"]["error"] = "shell command interrupted by user (ctrl-c)"
except Exception as e:
print e
res["action"]["error"] = str(e)
t_end = timenow()
res["action"]["finish_time"] = timestamp(t_end)
res["action"]["elapsed_time"] = timedelta(t_start,t_end)
os.chdir(cwd)
parent_res["trigger"]["active_actions"] = []
base.log(key=key,result=parent_res)
return res
class SVNAction(ShellAction):
def __init__(self,
svn_url,
svn_cmd,
svn_bin="svn",
working_dir=None,
description=None,
halt_on_error=True,
env=None):
cmd = " ".join([svn_bin,svn_cmd,svn_url])
super(SVNAction,self).__init__(cmd=cmd,
type="svn",
working_dir=working_dir,
description=description,
halt_on_error=halt_on_error,
env=env)
self.params["svn_url"] = svn_url
self.params["svn_cmd"] = svn_cmd
self.params["svn_bin"] = svn_bin
class CMakeAction(ShellAction):
def __init__(self,
src_dir,
cmake_opts="",
cmake_bin="cmake",
working_dir=None,
description=None,
halt_on_error=True,
env=None):
cmd = " ".join([cmake_bin,cmake_opts,src_dir])
super(CMakeAction,self).__init__(cmd=cmd,
type="cmake",
working_dir=working_dir,
description=description,
halt_on_error=halt_on_error,
env=env)
self.params["src_dir"] = src_dir
self.params["cmake_opts"] = cmake_opts
self.params["cmake_bin"] = cmake_bin
class MakeAction(ShellAction):
def __init__(self,
target="",
nthreads=1,
make_bin="make",
working_dir=None,
description=None,
halt_on_error=True,
env=None):
cmd = " ".join([make_bin,
"-j%d" % nthreads,
target])
super(MakeAction,self).__init__(cmd=cmd,
type="make",
working_dir=working_dir,
description=description,
halt_on_error=halt_on_error,
env=env)
self.params["target"] = target
self.params["nthreads"] = nthreads
self.params["make_bin"] =make_bin
class InorderTrigger(Action):
def __init__(self,actions=None):
super(InorderTrigger,self).__init__()
self.params["actions"] = []
if not actions is None:
self.params["actions"] = actions
self.params["type"] = "inorder"
def append(self,action_name):
self.params["actions"].append(action_name)
def extend(self,action_list):
self.params["actions"].extend(action_list)
def fire(self,base,trigger_name,key):
t_start = timenow();
res = {"trigger":{"name":trigger_name,
"key": key,
"start_time": timestamp(t_start),
"end_time": None,
"elapsed_time": None,
"actions": self.params["actions"],
"active_actions":[],
"type":"inorder",
"results" :[]}}
base.log(key,res)
try:
for action in self.params["actions"]:
print "[fire: %s]" % action
a = base.actions[action]
r = a.execute(base,key,action,res)
res["trigger"]["results"].append(r)
base.log(key,res)
if r["action"].has_key("return_code"):
print "[rcode: %d]" % r["action"]["return_code"]
if "error" in r["action"].keys() or \
(r["action"].has_key("return_code") and r["action"]["return_code"] != 0) :
emsg = "[action failed: %s]" % json.dumps(r,indent=2)
print emsg
if r["action"].has_key("halt_on_error") and r["action"]["halt_on_error"]:
raise Exception(emsg)
except Exception as emsg:
res["trigger"]["error"] = str(emsg)
t_end = timenow()
res["end_time"] = timestamp(t_end)
res["elapsed_time"] = timedelta(t_start,t_end)
base.log(key,res)
return res
shell = ShellAction
svn = SVNAction
cmake = CMakeAction
make = MakeAction
inorder = InorderTrigger
def view_log(fname):
port = 8000
html_src = pjoin(os.path.split(os.path.abspath(__file__)[0],"html"))
log_dir = os.path.split(os.path.abspath(fname))[0]
subprocess("cp -f %s/* %s" % html_src,log_dir)
os.chdir(log_dir)
subprocess.Popen([sys.executable, '-m', 'SimpleHTTPServer', str(port)])
webbrowser.open_new_tab('localhost:8000/log_view?log=%s' % fname)
| 35.455607
| 111
| 0.505766
|
90d4417c0ec7526b6462c4fe1fc28bef54ee46f5
| 2,906
|
py
|
Python
|
src/sentry/web/frontend/help_platform_base.py
|
arsh-co/sentry
|
7a83a7e8a13047a9471756d679e2deb596cc2ca1
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/web/frontend/help_platform_base.py
|
arsh-co/sentry
|
7a83a7e8a13047a9471756d679e2deb596cc2ca1
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/web/frontend/help_platform_base.py
|
arsh-co/sentry
|
7a83a7e8a13047a9471756d679e2deb596cc2ca1
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from django.db.models import Q
from itertools import groupby
from sentry.models import Project, ProjectKey
from sentry.web.frontend.base import BaseView
class HelpPlatformBaseView(BaseView):
auth_required = False
def get_project_list(self, user):
return list(Project.objects.filter(
Q(organization__member_set__has_global_access=True, organization__member_set__user=user)
| Q(team__organizationmember__user=user)
).select_related('team', 'organization').order_by('organization', 'team'))
def group_project_list(self, project_list):
results = []
for org, org_project_list in groupby(project_list, key=lambda x: x.organization):
org_results = []
for team, team_project_list in groupby(list(org_project_list), key=lambda x: x.team):
org_results.append((team, list(team_project_list)))
results.append((org, org_results))
return results
def get_key(self, project, user):
try:
key = ProjectKey.objects.filter(user=None, project=project)[0]
except IndexError:
try:
key = ProjectKey.objects.filter(user=user, project=project)[0]
except IndexError:
key = None
return key
def convert_args(self, request, *args, **kwargs):
try:
pid = int(request.GET.get('pid', request.session.get('pid', 0)))
except (TypeError, ValueError):
pid = None
if request.user.is_authenticated():
project_list = self.get_project_list(request.user)
else:
project_list = []
if pid:
for project in project_list:
if pid == project.id:
selected_project = project
break
else:
selected_project = None
else:
selected_project = None
if selected_project:
request.session['pid'] = selected_project.id
kwargs['project_list'] = project_list
kwargs['selected_project'] = selected_project
return (args, kwargs)
def get_context_data(self, request, project_list, selected_project, **kwargs):
context = super(HelpPlatformBaseView, self).get_context_data(request, **kwargs)
if selected_project:
key = self.get_key(selected_project, request.user)
else:
key = None
if key:
dsn_private = key.dsn_private
dsn_public = key.dsn_public
else:
dsn_private = None
dsn_public = None
context = {
'selected_project': selected_project,
'org_results': self.group_project_list(project_list),
'dsn': dsn_private,
'dsn_public': dsn_public,
}
return context
| 31.934066
| 100
| 0.607364
|
2b57721295576b58ec55546ba65a10b5fb906569
| 8,094
|
py
|
Python
|
src/wordleclone/app.py
|
jmesparteroup/CS150_ExtraLab_1
|
9a2e26e098d40cf0571e14459c96c42d97b21aa8
|
[
"BSD-3-Clause"
] | null | null | null |
src/wordleclone/app.py
|
jmesparteroup/CS150_ExtraLab_1
|
9a2e26e098d40cf0571e14459c96c42d97b21aa8
|
[
"BSD-3-Clause"
] | null | null | null |
src/wordleclone/app.py
|
jmesparteroup/CS150_ExtraLab_1
|
9a2e26e098d40cf0571e14459c96c42d97b21aa8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Wordle Clone
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
from random import randint
class Vault():
def __init__(self,path) -> None:
self.resources_folder = f"{path}/resources/"
self.guess_list, self.allowed_guesses = self.getGuessData()
self.setSecret()
def getGuessData(self):
guesses = open(f"{self.resources_folder}guesses.txt", 'r').readlines()
allowed_guesses = open(f"{self.resources_folder}allowedguesses.txt", 'r').readlines()
for i in range(len(guesses)):
guesses[i] = guesses[i].replace('\n', '')
for i in range(len(allowed_guesses)):
allowed_guesses[i] = allowed_guesses[i].replace('\n', '')
return guesses, allowed_guesses
def validate_guess(self, guess):
invalid_title = "Invalid Guess"
if len(guess) < 5:
return [False, {'invalid_title': invalid_title, 'invalid_body':'Guess length should be equal to 5!\n'}]
if not guess.isalpha() :
return [False, {'invalid_title': invalid_title, 'invalid_body':'Guess should only be comprised of letters in the English alphabet!\n'}]
if guess not in self.allowed_guesses:
return [False, {'invalid_title': invalid_title, 'invalid_body':'Guess not allowed!\n'}]
return [True]
def setSecret(self):
self.secretWord = self.guess_list[randint(0, len(self.guess_list)-1)]
def getSecret(self):
return self.secretWord
class Game():
def __init__(self, gridsize, wordsize, interactables, vault) -> None:
self.current_guess_count = 0
self.isGameOver = False
self.isWin = False
self.gridsize = gridsize
self.wordsize = wordsize
self.vault = vault
self.row_boxes = interactables['row_boxes']
self.get_guess_input = interactables['guess_input']
self.show_dialog = interactables['show_dialog']
self.used_letter = interactables['used_letter']
def do_guess(self):
if self.isGameOver: return
localSecret = self.vault.getSecret()
guess = self.get_guess_input()
score = 0
validatorCheck = self.vault.validate_guess(guess)
if not validatorCheck[0]:
error_message = validatorCheck[1]
self.show_dialog(error_message['invalid_title'], error_message['invalid_body'], 0)
return
for i in range(self.wordsize):
current_box = self.row_boxes[self.current_guess_count].children[i]
current_box.label = guess[i].upper()
current_box.style.background_color = '#A9A9A9'
self.used_letter(guess[i], '#696969')
if guess[i] == localSecret[i]:
localSecret = localSecret[:i] + '!' + localSecret[i+1:]
guess = guess[:i] + '?' + guess[i+1:]
self.used_letter(guess[i], '#90EE90')
current_box.style.background_color = '#90EE90'
score += 1
for i in range(self.wordsize):
current_box = self.row_boxes[self.current_guess_count].children[i]
if guess[i] in localSecret:
localSecret = localSecret.replace(guess[i], '!')
current_box.style.background_color = '#FFFF33'
self.used_letter(guess[i], '#FFFF33')
if score == self.wordsize:
self.isGameOver = True
self.isWin = True
self.show_dialog("Game Over!", "You won! Pasikat ka na sa Twitter!\nClick reset to play again", 1)
self.current_guess_count += 1
if self.current_guess_count > 5 and not self.isWin:
self.isGameOver = True
self.show_dialog("Game Over!", f"You used up all your guesses.\nThe secret word was {self.vault.getSecret().upper()} \nClick reset to play again :(", 1)
return
def resetGame(self):
self.current_guess_count = 0
self.isWin = False
self.isGameOver = False
self.vault.setSecret()
return
class WordleClone(toga.App):
def startup(self):
#SET VALUES
self.gridsize = 6 # nxn grid
self.wordsize = 5
self.alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.vault = Vault(str(self.paths.app))
# Generate UI
self.generate_ui()
# Start Game
self.start_game()
def start_game(self):
interactables = {
'guess_input': self.get_guess_input,
'row_boxes': self.row_boxes,
'show_dialog': self.show_dialog,
'used_letter': self.used_letter
}
self.Game = Game(self.gridsize, self.wordsize, interactables, self.vault)
def generate_ui(self):
main_box = toga.Box(style=Pack(direction=COLUMN, alignment="center"))
guess_label = toga.Label('Your guess: ', style=Pack(padding=5))
self.guess_input = toga.TextInput(style=Pack(flex=1))
guess_box = toga.Box(style=Pack(direction=ROW, padding=(5,0)))
guess_box.add(guess_label)
guess_box.add(self.guess_input)
button = toga.Button('Guess!', on_press=self.do_guess_handler, style=Pack(padding=(5,0)))
self.alphaBox = toga.Box(style=Pack(alignment='center', padding=(5,0)))
for letter in self.alphabet:
alphabet_label = toga.Label(letter, style=Pack(padding=(5,0), alignment='center', font_size='12', background_color='#696969', color='#FAF9F6'))
self.alphaBox.add(alphabet_label)
resetButton = toga.Button('Restart', on_press=self.reset_game_handler, style=Pack(padding=(5,0)))
main_box.add(guess_box)
main_box.add(button)
main_box.add(self.alphaBox)
main_box.add(self.guess_area_gen())
main_box.add(resetButton)
self.main_window = toga.MainWindow(title=self.formal_name, size=(600, 500))
self.main_window.content = main_box
self.main_window.show()
def guess_area_gen(self):
guess_area = toga.Box(style=Pack(direction=COLUMN, padding=0, flex=1, alignment="center"))
self.row_boxes = [toga.Box(style=Pack(direction=ROW, padding=2, flex=1, alignment="center")) for i in range(self.gridsize)] #create 6 rows
for i in range(self.gridsize):
for j in range(self.wordsize):
newButton = toga.Button(label="", id=f"{j}", style=Pack(background_color="#FAF9F6", font_family='monospace', font_weight='bold', padding_left=5, padding_right=5, width=50, height=50), enabled=False)
self.row_boxes[i].add(newButton)
guess_area.add(self.row_boxes[i])
return guess_area
def reset_ui(self):
#clear contents
for i in range(self.gridsize):
for j in range(self.wordsize):
self.row_boxes[i].children[j].style.background_color = "#FAF9F6"
self.row_boxes[i].children[j].label = ""
#reset hints
for i in range(len(self.alphabet)):
self.alphaBox.children[i].style.color = '#FAF9F6'
return
def show_dialog(self, title, message, messageType):
if messageType == 0:
self.main_window.error_dialog(title=title, message=message)
if messageType == 1:
self.main_window.info_dialog(title=title, message=message)
return
def used_letter(self, letter, color):
for i in range(len(self.alphabet)):
if self.alphabet[i].lower() == letter:
self.alphaBox.children[i].style.color = color
return
def get_guess_input(self):
guess = self.guess_input.value.lower()
self.guess_input.value = ""
return guess
def do_guess_handler(self, widget):
return self.Game.do_guess()
def reset_game_handler(self, widget):
self.reset_ui()
return self.Game.resetGame()
def main():
return WordleClone()
| 35.191304
| 214
| 0.608723
|
bf5563fb92aa6548cbffc74edd86ffc3c536e1f9
| 4,895
|
py
|
Python
|
extras/ams_wrapper/tests/unit/test_detection_model.py
|
gnomonsis/model_server
|
bdd7eccde5dfa4f518f6964c103b4389cd00caaf
|
[
"Apache-2.0"
] | null | null | null |
extras/ams_wrapper/tests/unit/test_detection_model.py
|
gnomonsis/model_server
|
bdd7eccde5dfa4f518f6964c103b4389cd00caaf
|
[
"Apache-2.0"
] | null | null | null |
extras/ams_wrapper/tests/unit/test_detection_model.py
|
gnomonsis/model_server
|
bdd7eccde5dfa4f518f6964c103b4389cd00caaf
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from typing import Dict
import pytest
import numpy as np
from src.api.models.detection_model import DetectionModel
from src.api.models.model_config import ModelOutputConfiguration
MOCK_INFERENCE_OUTPUT = {
'result':
np.array([
[
[
# id, label, confidence, x_min, y_min, x_max, y_max
[0, 1.0, 0.97, 0.17, 0.15, 0.5, 0.7],
[1, 1.0, 0.46, 0.12, 0.11, 0.6, 0.5],
]
]
])
}
@pytest.fixture
def fake_output_config() -> Dict[str, ModelOutputConfiguration]:
return {
'result': ModelOutputConfiguration(output_name='result',
value_index_mapping={
"image_id": 0,
"value": 1,
"confidence": 2,
"x_min": 3,
"y_min": 4,
"x_max": 5,
"y_max": 6
},
classes={
"background": 0.0,
"vehicle": 1.0
}
)
}
@pytest.mark.parametrize("inference_output,expected_response", [
(MOCK_INFERENCE_OUTPUT,
{"type": "entity", "subtype": None,
"entities": [{"tag": {"value": "vehicle", "confidence": 0.97},
"box": {"l": 0.17, "t": 0.15, "w": abs(0.5-0.17), "h": abs(0.7-0.15)}},
{"tag": {"value": "vehicle", "confidence": 0.46},
"box": {"l": 0.12, "t": 0.11, "w": abs(0.6-0.12), "h": abs(0.5-0.11)}}]}
)])
def test_postprocess_inference_output(inference_output, expected_response, fake_output_config):
model = DetectionModel(endpoint=None, ovms_connector=None, input_configs=None,
output_configs=fake_output_config)
assert model.postprocess_inference_output(
inference_output) == json.dumps(expected_response)
@pytest.mark.parametrize("inference_output,expected_response,top_k", [
(MOCK_INFERENCE_OUTPUT,
{"type": "entity", "subtype": None,
"entities": [{"tag": {"value": "vehicle", "confidence": 0.97},
"box": {"l": 0.17, "t": 0.15, "w": abs(0.5-0.17), "h": abs(0.7-0.15)}}]
},
1
)])
def test_postprocess_inference_output_top_k(inference_output, expected_response, top_k, fake_output_config):
fake_output_config['result'].top_k_results = top_k
model = DetectionModel(endpoint=None, ovms_connector=None, input_configs=None,
output_configs=fake_output_config)
assert model.postprocess_inference_output(
inference_output) == json.dumps(expected_response)
@pytest.mark.parametrize("inference_output,expected_response,confidence_threshold", [
(MOCK_INFERENCE_OUTPUT,
{"type": "entity", "subtype": None,
"entities": [{"tag": {"value": "vehicle", "confidence": 0.97},
"box": {"l": 0.17, "t": 0.15, "w": abs(0.5-0.17), "h": abs(0.7-0.15)}}]
},
0.5
)])
def test_postprocess_inference_output_confidence_threshold(inference_output, expected_response,
confidence_threshold, fake_output_config):
fake_output_config['result'].confidence_threshold = confidence_threshold
model = DetectionModel(endpoint=None, ovms_connector=None, input_configs=None,
output_configs=fake_output_config)
assert model.postprocess_inference_output(
inference_output) == json.dumps(expected_response)
| 44.5
| 112
| 0.501328
|
ffd4ba596e861816800cb7ed67990ba259f5936b
| 127,280
|
py
|
Python
|
designer_family/tests/functional/db/test_allocation_candidates.py
|
guishaowu/designer_family
|
c89e16c6649c181f3262aa65fa97a457abdc2eb2
|
[
"Apache-2.0"
] | null | null | null |
designer_family/tests/functional/db/test_allocation_candidates.py
|
guishaowu/designer_family
|
c89e16c6649c181f3262aa65fa97a457abdc2eb2
|
[
"Apache-2.0"
] | null | null | null |
designer_family/tests/functional/db/test_allocation_candidates.py
|
guishaowu/designer_family
|
c89e16c6649c181f3262aa65fa97a457abdc2eb2
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import os_resource_classes as orc
import os_traits
from oslo_utils.fixture import uuidsentinel as uuids
import six
import sqlalchemy as sa
from designer_family import exception
from designer_family import lib as placement_lib
from designer_family.objects import allocation_candidate as ac_obj
from designer_family.objects import research_context as res_ctx
from designer_family.objects import resource_class as rc_obj
from designer_family.objects import resource_provider as rp_obj
from designer_family.objects import trait as trait_obj
from designer_family.tests.functional.db import test_base as tb
def _req_group_search_context(context, **kwargs):
resources = {
orc.VCPU: 2,
orc.MEMORY_MB: 256,
orc.SRIOV_NET_VF: 1,
}
request = placement_lib.RequestGroup(
use_same_provider=False,
resources=kwargs.get('resources', resources),
required_traits=kwargs.get('required_traits', {}),
forbidden_traits=kwargs.get('forbidden_traits', {}),
member_of=kwargs.get('member_of', []),
forbidden_aggs=kwargs.get('forbidden_aggs', []),
in_tree=kwargs.get('in_tree', None),
)
has_trees = res_ctx._has_provider_trees(context)
sharing = res_ctx.get_sharing_providers(context)
rg_ctx = res_ctx.RequestGroupSearchContext(
context, request, has_trees, sharing)
return rg_ctx
class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
def test_get_provider_ids_matching(self):
# These RPs are named based on whether we expect them to be 'incl'uded
# or 'excl'uded in the result.
# No inventory records. This one should never show up in a result.
self._create_provider('no_inventory')
# Inventory of adequate CPU and memory, no allocations against it.
excl_big_cm_noalloc = self._create_provider('big_cm_noalloc')
tb.add_inventory(excl_big_cm_noalloc, orc.VCPU, 15)
tb.add_inventory(excl_big_cm_noalloc, orc.MEMORY_MB,
4096, max_unit=2048)
# Inventory of adequate memory and disk, no allocations against it.
excl_big_md_noalloc = self._create_provider('big_md_noalloc')
tb.add_inventory(excl_big_md_noalloc, orc.MEMORY_MB,
4096, max_unit=2048)
tb.add_inventory(excl_big_md_noalloc, orc.DISK_GB, 2000)
# Adequate inventory, no allocations against it.
incl_biginv_noalloc = self._create_provider('biginv_noalloc')
tb.add_inventory(incl_biginv_noalloc, orc.VCPU, 15)
tb.add_inventory(incl_biginv_noalloc, orc.MEMORY_MB,
4096, max_unit=2048)
tb.add_inventory(incl_biginv_noalloc, orc.DISK_GB, 2000)
# No allocations, but inventory unusable. Try to hit all the possible
# reasons for exclusion.
# VCPU min_unit too high
excl_badinv_min_unit = self._create_provider('badinv_min_unit')
tb.add_inventory(excl_badinv_min_unit, orc.VCPU, 12, min_unit=6)
tb.add_inventory(excl_badinv_min_unit, orc.MEMORY_MB,
4096, max_unit=2048)
tb.add_inventory(excl_badinv_min_unit, orc.DISK_GB, 2000)
# MEMORY_MB max_unit too low
excl_badinv_max_unit = self._create_provider('badinv_max_unit')
tb.add_inventory(excl_badinv_max_unit, orc.VCPU, 15)
tb.add_inventory(excl_badinv_max_unit, orc.MEMORY_MB,
4096, max_unit=512)
tb.add_inventory(excl_badinv_max_unit, orc.DISK_GB, 2000)
# DISK_GB unsuitable step_size
excl_badinv_step_size = self._create_provider('badinv_step_size')
tb.add_inventory(excl_badinv_step_size, orc.VCPU, 15)
tb.add_inventory(excl_badinv_step_size, orc.MEMORY_MB,
4096, max_unit=2048)
tb.add_inventory(excl_badinv_step_size, orc.DISK_GB, 2000, step_size=7)
# Not enough total VCPU
excl_badinv_total = self._create_provider('badinv_total')
tb.add_inventory(excl_badinv_total, orc.VCPU, 4)
tb.add_inventory(excl_badinv_total, orc.MEMORY_MB,
4096, max_unit=2048)
tb.add_inventory(excl_badinv_total, orc.DISK_GB, 2000)
# Too much reserved MEMORY_MB
excl_badinv_reserved = self._create_provider('badinv_reserved')
tb.add_inventory(excl_badinv_reserved, orc.VCPU, 15)
tb.add_inventory(excl_badinv_reserved, orc.MEMORY_MB,
4096, max_unit=2048, reserved=3500)
tb.add_inventory(excl_badinv_reserved, orc.DISK_GB, 2000)
# DISK_GB allocation ratio blows it up
excl_badinv_alloc_ratio = self._create_provider('badinv_alloc_ratio')
tb.add_inventory(excl_badinv_alloc_ratio, orc.VCPU, 15)
tb.add_inventory(excl_badinv_alloc_ratio,
orc.MEMORY_MB, 4096, max_unit=2048)
tb.add_inventory(excl_badinv_alloc_ratio, orc.DISK_GB,
2000, allocation_ratio=0.5)
# Inventory consumed in one RC, but available in the others
excl_1invunavail = self._create_provider('1invunavail')
tb.add_inventory(excl_1invunavail, orc.VCPU, 10)
self.allocate_from_provider(excl_1invunavail, orc.VCPU, 7)
tb.add_inventory(excl_1invunavail, orc.MEMORY_MB, 4096)
self.allocate_from_provider(excl_1invunavail, orc.MEMORY_MB, 1024)
tb.add_inventory(excl_1invunavail, orc.DISK_GB, 2000)
self.allocate_from_provider(excl_1invunavail, orc.DISK_GB, 400)
# Inventory all consumed
excl_allused = self._create_provider('allused')
tb.add_inventory(excl_allused, orc.VCPU, 10)
self.allocate_from_provider(excl_allused, orc.VCPU, 7)
tb.add_inventory(excl_allused, orc.MEMORY_MB, 4000)
self.allocate_from_provider(excl_allused, orc.MEMORY_MB, 1500)
self.allocate_from_provider(excl_allused, orc.MEMORY_MB, 2000)
tb.add_inventory(excl_allused, orc.DISK_GB, 1500)
self.allocate_from_provider(excl_allused, orc.DISK_GB, 1)
# Inventory available in requested classes, but unavailable in others
incl_extra_full = self._create_provider('extra_full')
tb.add_inventory(incl_extra_full, orc.VCPU, 20)
self.allocate_from_provider(incl_extra_full, orc.VCPU, 15)
tb.add_inventory(incl_extra_full, orc.MEMORY_MB, 4096)
self.allocate_from_provider(incl_extra_full, orc.MEMORY_MB, 1024)
tb.add_inventory(incl_extra_full, orc.DISK_GB, 2000)
self.allocate_from_provider(incl_extra_full, orc.DISK_GB, 400)
tb.add_inventory(incl_extra_full, orc.PCI_DEVICE, 4)
self.allocate_from_provider(incl_extra_full, orc.PCI_DEVICE, 1)
self.allocate_from_provider(incl_extra_full, orc.PCI_DEVICE, 3)
# Inventory available in a unrequested classes, not in requested ones
excl_extra_avail = self._create_provider('extra_avail')
# Incompatible step size
tb.add_inventory(excl_extra_avail, orc.VCPU, 10, step_size=3)
# Not enough left after reserved + used
tb.add_inventory(excl_extra_avail, orc.MEMORY_MB, 4096,
max_unit=2048, reserved=2048)
self.allocate_from_provider(excl_extra_avail, orc.MEMORY_MB, 1040)
# Allocation ratio math
tb.add_inventory(excl_extra_avail, orc.DISK_GB, 2000,
allocation_ratio=0.5)
tb.add_inventory(excl_extra_avail, orc.IPV4_ADDRESS, 48)
custom_special = rc_obj.ResourceClass(self.ctx, name='CUSTOM_SPECIAL')
custom_special.create()
tb.add_inventory(excl_extra_avail, 'CUSTOM_SPECIAL', 100)
self.allocate_from_provider(excl_extra_avail, 'CUSTOM_SPECIAL', 99)
resources = {
orc.VCPU: 5,
orc.MEMORY_MB: 1024,
orc.DISK_GB: 1500
}
# Run it!
rg_ctx = _req_group_search_context(self.ctx, resources=resources)
res = res_ctx.get_provider_ids_matching(rg_ctx)
# We should get all the incl_* RPs
expected = [incl_biginv_noalloc, incl_extra_full]
self.assertEqual(set((rp.id, rp.id) for rp in expected), set(res))
# Now request that the providers must have a set of required traits and
# that this results in no results returned, since we haven't yet
# associated any traits with the providers
avx2_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_CPU_X86_AVX2)
# get_provider_ids_matching()'s required_traits and forbidden_traits
# arguments maps, keyed by trait name, of the trait internal ID
req_traits = {os_traits.HW_CPU_X86_AVX2: avx2_t.id}
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
required_traits=req_traits,
)
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual([], res)
# Next let's set the required trait to an excl_* RPs.
# This should result in no results returned as well.
excl_big_md_noalloc.set_traits([avx2_t])
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual([], res)
# OK, now add the trait to one of the incl_* providers and verify that
# provider now shows up in our results
incl_biginv_noalloc.set_traits([avx2_t])
res = res_ctx.get_provider_ids_matching(rg_ctx)
rp_ids = [r[0] for r in res]
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
# Let's see if the in_tree filter works
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
in_tree=uuids.biginv_noalloc,
)
res = res_ctx.get_provider_ids_matching(rg_ctx)
rp_ids = [r[0] for r in res]
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
# We don't get anything if the specified tree doesn't satisfy the
# requirements in the first place
self.assertRaises(exception.ResourceProviderNotFound,
_req_group_search_context,
self.ctx, resources=resources,
in_tree=uuids.allused)
def test_get_provider_ids_matching_with_multiple_forbidden(self):
rp1 = self._create_provider('rp1', uuids.agg1)
tb.add_inventory(rp1, orc.VCPU, 64)
rp2 = self._create_provider('rp2', uuids.agg1)
trait_two, = tb.set_traits(rp2, 'CUSTOM_TWO')
tb.add_inventory(rp2, orc.VCPU, 64)
rp3 = self._create_provider('rp3')
trait_three, = tb.set_traits(rp3, 'CUSTOM_THREE')
tb.add_inventory(rp3, orc.VCPU, 64)
resources = {orc.VCPU: 4}
forbidden_traits = {trait_two.name: trait_two.id,
trait_three.name: trait_three.id}
member_of = [[uuids.agg1]]
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
forbidden_traits=forbidden_traits,
member_of=member_of)
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual({(rp1.id, rp1.id)}, set(res))
def test_get_provider_ids_matching_with_aggregates(self):
rp1 = self._create_provider('rp1', uuids.agg1, uuids.agg2)
rp2 = self._create_provider('rp2', uuids.agg2, uuids.agg3)
rp3 = self._create_provider('rp3', uuids.agg3, uuids.agg4)
rp4 = self._create_provider('rp4', uuids.agg4, uuids.agg1)
rp5 = self._create_provider('rp5')
tb.add_inventory(rp1, orc.VCPU, 64)
tb.add_inventory(rp2, orc.VCPU, 64)
tb.add_inventory(rp3, orc.VCPU, 64)
tb.add_inventory(rp4, orc.VCPU, 64)
tb.add_inventory(rp5, orc.VCPU, 64)
resources = {orc.VCPU: 4}
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
member_of=[[uuids.agg1]],
)
expected_rp = [rp1, rp4]
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
member_of=[[uuids.agg1, uuids.agg2]],
)
expected_rp = [rp1, rp2, rp4]
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
member_of=[[uuids.agg1, uuids.agg2], [uuids.agg4]],
)
expected_rp = [rp4]
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
forbidden_aggs=[uuids.agg1],
)
expected_rp = [rp2, rp3, rp5]
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
forbidden_aggs=[uuids.agg1, uuids.agg2],
)
expected_rp = [rp3, rp5]
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
member_of=[[uuids.agg1, uuids.agg2]],
forbidden_aggs=[uuids.agg3, uuids.agg4],
)
expected_rp = [rp1]
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
member_of=[[uuids.agg1]],
forbidden_aggs=[uuids.agg1],
)
expected_rp = []
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
def test_get_provider_ids_having_all_traits(self):
def run(traitnames, expected_ids):
tmap = {}
if traitnames:
tmap = trait_obj.ids_from_names(self.ctx, traitnames)
obs = res_ctx._get_provider_ids_having_all_traits(self.ctx, tmap)
self.assertEqual(sorted(expected_ids), sorted(obs))
# No traits. This will never be returned, because it's illegal to
# invoke the method with no traits.
self._create_provider('cn1')
# One trait
cn2 = self._create_provider('cn2')
tb.set_traits(cn2, 'HW_CPU_X86_TBM')
# One the same as cn2
cn3 = self._create_provider('cn3')
tb.set_traits(cn3, 'HW_CPU_X86_TBM', 'HW_CPU_X86_TSX',
'HW_CPU_X86_SGX')
# Disjoint
cn4 = self._create_provider('cn4')
tb.set_traits(cn4, 'HW_CPU_X86_SSE2', 'HW_CPU_X86_SSE3', 'CUSTOM_FOO')
# Request with no traits not allowed
self.assertRaises(
ValueError,
res_ctx._get_provider_ids_having_all_traits, self.ctx, None)
self.assertRaises(
ValueError,
res_ctx._get_provider_ids_having_all_traits, self.ctx, {})
# Common trait returns both RPs having it
run(['HW_CPU_X86_TBM'], [cn2.id, cn3.id])
# Just the one
run(['HW_CPU_X86_TSX'], [cn3.id])
run(['HW_CPU_X86_TSX', 'HW_CPU_X86_SGX'], [cn3.id])
run(['CUSTOM_FOO'], [cn4.id])
# Including the common one still just gets me cn3
run(['HW_CPU_X86_TBM', 'HW_CPU_X86_SGX'], [cn3.id])
run(['HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', 'HW_CPU_X86_SGX'], [cn3.id])
# Can't be satisfied
run(['HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', 'CUSTOM_FOO'], [])
run(['HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', 'HW_CPU_X86_SGX',
'CUSTOM_FOO'], [])
run(['HW_CPU_X86_SGX', 'HW_CPU_X86_SSE3'], [])
run(['HW_CPU_X86_TBM', 'CUSTOM_FOO'], [])
run(['HW_CPU_X86_BMI'], [])
trait_obj.Trait(self.ctx, name='CUSTOM_BAR').create()
run(['CUSTOM_BAR'], [])
class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
def _get_rp_ids_matching_names(self, names):
"""Utility function to look up resource provider IDs from a set of
supplied provider names directly from the API DB.
"""
names = map(six.text_type, names)
sel = sa.select([rp_obj._RP_TBL.c.id])
sel = sel.where(rp_obj._RP_TBL.c.name.in_(names))
with self.placement_db.get_engine().connect() as conn:
rp_ids = set([r[0] for r in conn.execute(sel)])
return rp_ids
def test_get_trees_matching_all(self):
"""Creates a few provider trees having different inventories and
allocations and tests the get_trees_matching_all_resources() utility
function to ensure that matching trees and resource providers are
returned.
"""
def _run_test(expected_trees, expected_rps, **kwargs):
"""Helper function to validate the test result"""
# NOTE(jaypipes): get_trees_matching_all() expects a dict of
# resource class internal identifiers, not string names
if not expected_trees:
try:
self.assertRaises(exception.ResourceProviderNotFound,
_req_group_search_context,
self.ctx, **kwargs)
return
except Exception:
pass
rg_ctx = _req_group_search_context(self.ctx, **kwargs)
rw_ctx = res_ctx.RequestWideSearchContext(
self.ctx, placement_lib.RequestWideParams(), True)
results = res_ctx.get_trees_matching_all(rg_ctx, rw_ctx)
tree_ids = self._get_rp_ids_matching_names(expected_trees)
rp_ids = self._get_rp_ids_matching_names(expected_rps)
self.assertEqual(tree_ids, results.trees)
self.assertEqual(rp_ids, results.rps)
# Before we even set up any providers, verify that the short-circuits
# work to return empty lists
_run_test([], [])
# We are setting up 3 trees of providers that look like this:
#
# compute node (cn)
# / \
# / \
# numa cell 0 numa cell 1
# | |
# | |
# pf 0 pf 1
#
for x in ('1', '2', '3'):
name = 'cn' + x
cn = self._create_provider(name)
tb.add_inventory(cn, orc.VCPU, 16)
tb.add_inventory(cn, orc.MEMORY_MB, 32768)
name = 'cn' + x + '_numa0'
numa_cell0 = self._create_provider(name, parent=cn.uuid)
name = 'cn' + x + '_numa1'
numa_cell1 = self._create_provider(name, parent=cn.uuid)
name = 'cn' + x + '_numa0_pf0'
pf0 = self._create_provider(name, parent=numa_cell0.uuid)
tb.add_inventory(pf0, orc.SRIOV_NET_VF, 8)
name = 'cn' + x + '_numa1_pf1'
pf1 = self._create_provider(name, parent=numa_cell1.uuid)
tb.add_inventory(pf1, orc.SRIOV_NET_VF, 8)
if x == '1':
# Associate the first compute node with agg1 and agg2
cn.set_aggregates([uuids.agg1, uuids.agg2])
if x == '2':
# Associate the second PF on the second compute node with agg2
pf1.set_aggregates([uuids.agg2])
if x == '3':
# Associate the first compute node with agg2 and agg3
cn.set_aggregates([uuids.agg2, uuids.agg3])
# Associate the second PF on the second compute node with agg4
pf1.set_aggregates([uuids.agg4])
# Mark the second PF on the third compute node as having
# GENEVE offload enabled
tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE)
# Doesn't really make a whole lot of logical sense, but allows
# us to test situations where the same trait is associated with
# multiple providers in the same tree and one of the providers
# has inventory we will use...
tb.set_traits(cn, os_traits.HW_NIC_OFFLOAD_GENEVE)
# First, we test that all the candidates are returned
expected_trees = ['cn1', 'cn2', 'cn3']
expected_rps = ['cn1', 'cn1_numa0_pf0', 'cn1_numa1_pf1',
'cn2', 'cn2_numa0_pf0', 'cn2_numa1_pf1',
'cn3', 'cn3_numa0_pf0', 'cn3_numa1_pf1']
_run_test(expected_trees, expected_rps)
# Let's see if the tree_root_id filter works
expected_trees = ['cn1']
expected_rps = ['cn1', 'cn1_numa0_pf0', 'cn1_numa1_pf1']
_run_test(expected_trees, expected_rps, in_tree=uuids.cn1)
# Let's see if the aggregate filter works
# 1. rps in agg1
# All rps under cn1 should be included because aggregate on a root
# spans the whole tree
member_of = [[uuids.agg1]]
expected_trees = ['cn1']
expected_rps = ['cn1', 'cn1_numa0_pf0', 'cn1_numa1_pf1']
_run_test(expected_trees, expected_rps, member_of=member_of)
# 2. rps in agg2
# cn2 doesn't come up because while cn2_numa1_pf1 is in agg2, aggs on
# non-root does NOT span the whole tree. Thus cn2 can't provide VCPU
# or MEMORY_MB resource
member_of = [[uuids.agg2]]
expected_trees = ['cn1', 'cn3']
expected_rps = ['cn1', 'cn1_numa0_pf0', 'cn1_numa1_pf1',
'cn3', 'cn3_numa0_pf0', 'cn3_numa1_pf1']
_run_test(expected_trees, expected_rps, member_of=member_of)
# 3. rps in agg1 or agg3
# cn1 in agg1 and cn3 in agg3 comes up
member_of = [[uuids.agg1, uuids.agg3]]
expected_trees = ['cn1', 'cn3']
expected_rps = ['cn1', 'cn1_numa0_pf0', 'cn1_numa1_pf1',
'cn3', 'cn3_numa0_pf0', 'cn3_numa1_pf1']
_run_test(expected_trees, expected_rps, member_of=member_of)
# 4. rps in (agg1 or agg2) and (agg3)
# cn1 is not in agg3
member_of = [[uuids.agg1, uuids.agg2], [uuids.agg3]]
expected_trees = ['cn3']
expected_rps = ['cn3', 'cn3_numa0_pf0', 'cn3_numa1_pf1']
_run_test(expected_trees, expected_rps, member_of=member_of)
# 5. rps not in agg1
# All rps under cn1 are excluded
forbidden_aggs = [uuids.agg1]
expected_trees = ['cn2', 'cn3']
expected_rps = ['cn2', 'cn2_numa0_pf0', 'cn2_numa1_pf1',
'cn3', 'cn3_numa0_pf0', 'cn3_numa1_pf1']
_run_test(expected_trees, expected_rps, forbidden_aggs=forbidden_aggs)
# 6. rps not in agg2
# All rps under cn1, under cn3 and pf1 on cn2 are excluded
forbidden_aggs = [uuids.agg2]
expected_trees = ['cn2']
expected_rps = ['cn2', 'cn2_numa0_pf0']
_run_test(expected_trees, expected_rps, forbidden_aggs=forbidden_aggs)
# 7. rps neither in agg1 nor in agg4
# All rps under cn1 and pf1 on cn3 are excluded
forbidden_aggs = [uuids.agg1, uuids.agg4]
expected_trees = ['cn2', 'cn3']
expected_rps = ['cn2', 'cn2_numa0_pf0', 'cn2_numa1_pf1',
'cn3', 'cn3_numa0_pf0']
_run_test(expected_trees, expected_rps, forbidden_aggs=forbidden_aggs)
# 8. rps in agg3 and neither in agg1 nor in agg4
# cn2 is not in agg3 so excluded
member_of = [[uuids.agg3]]
forbidden_aggs = [uuids.agg1, uuids.agg4]
expected_trees = ['cn3']
expected_rps = ['cn3', 'cn3_numa0_pf0']
_run_test(expected_trees, expected_rps, member_of=member_of,
forbidden_aggs=forbidden_aggs)
# 9. rps in agg1 or agg3 and not in agg3
# ...which means rps in agg1 but not in agg3
member_of = [[uuids.agg1, uuids.agg3]]
forbidden_aggs = [uuids.agg3]
expected_trees = ['cn1']
expected_rps = ['cn1', 'cn1_numa0_pf0', 'cn1_numa1_pf1']
_run_test(expected_trees, expected_rps, member_of=member_of,
forbidden_aggs=forbidden_aggs)
# 10. rps in agg1 and not in agg1
# ...which results in no rp
member_of = [[uuids.agg1]]
forbidden_aggs = [uuids.agg1]
expected_trees = []
expected_rps = []
_run_test(expected_trees, expected_rps, member_of=member_of,
forbidden_aggs=forbidden_aggs)
# OK, now consume all the VFs in the second compute node and verify
# only the first and third computes are returned as root providers from
# get_trees_matching_all()
cn2_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn2_numa0_pf0)
self.allocate_from_provider(cn2_pf0, orc.SRIOV_NET_VF, 8)
cn2_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn2_numa1_pf1)
self.allocate_from_provider(cn2_pf1, orc.SRIOV_NET_VF, 8)
# cn2 had all its VFs consumed, so we should only get cn1 and cn3's IDs
# as the root provider IDs.
expected_trees = ['cn1', 'cn3']
expected_rps = ['cn1', 'cn1_numa0_pf0', 'cn1_numa1_pf1',
'cn3', 'cn3_numa0_pf0', 'cn3_numa1_pf1']
_run_test(expected_trees, expected_rps)
# OK, now we're going to add a required trait to the mix. The only
# provider that is decorated with the HW_NIC_OFFLOAD_GENEVE trait is
# the second physical function on the third compute host. So we should
# only get the third compute node back if we require that trait
geneve_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_NIC_OFFLOAD_GENEVE)
# required_traits parameter is a dict of trait name to internal ID
req_traits = {
geneve_t.name: geneve_t.id,
}
expected_trees = ['cn3']
# NOTE(tetsuro): Actually we also get providers without traits here.
# This is reported as bug#1771707 and from users' view the bug is now
# fixed out of this get_trees_matching_all() function by checking
# traits later again in _check_traits_for_alloc_request().
# But ideally, we'd like to have only pf1 from cn3 here using SQL
# query in get_trees_matching_all() function for optimization.
# provider_names = ['cn3', 'cn3_numa1_pf1']
expected_rps = ['cn3', 'cn3_numa0_pf0', 'cn3_numa1_pf1']
_run_test(expected_trees, expected_rps, required_traits=req_traits)
# Add in a required trait that no provider has associated with it and
# verify that there are no returned allocation candidates
avx2_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_CPU_X86_AVX2)
# required_traits parameter is a dict of trait name to internal ID
req_traits = {
geneve_t.name: geneve_t.id,
avx2_t.name: avx2_t.id,
}
_run_test([], [], required_traits=req_traits)
# If we add the AVX2 trait as forbidden, not required, then we
# should get back the original cn3
req_traits = {
geneve_t.name: geneve_t.id,
}
forbidden_traits = {
avx2_t.name: avx2_t.id,
}
expected_trees = ['cn3']
# NOTE(tetsuro): Actually we also get providers without traits here.
# This is reported as bug#1771707 and from users' view the bug is now
# fixed out of this get_trees_matching_all() function by checking
# traits later again in _check_traits_for_alloc_request().
# But ideally, we'd like to have only pf1 from cn3 here using SQL
# query in get_trees_matching_all() function for optimization.
# provider_names = ['cn3', 'cn3_numa1_pf1']
expected_rps = ['cn3', 'cn3_numa0_pf0', 'cn3_numa1_pf1']
_run_test(expected_trees, expected_rps,
required_traits=req_traits,
forbidden_traits=forbidden_traits)
# Consume all the VFs in first and third compute nodes and verify
# no more providers are returned
cn1_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn1_numa0_pf0)
self.allocate_from_provider(cn1_pf0, orc.SRIOV_NET_VF, 8)
cn1_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn1_numa1_pf1)
self.allocate_from_provider(cn1_pf1, orc.SRIOV_NET_VF, 8)
cn3_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn3_numa0_pf0)
self.allocate_from_provider(cn3_pf0, orc.SRIOV_NET_VF, 8)
cn3_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn3_numa1_pf1)
self.allocate_from_provider(cn3_pf1, orc.SRIOV_NET_VF, 8)
_run_test([], [], required_traits=req_traits,
forbidden_traits=forbidden_traits)
def _make_trees_with_traits(self):
# We are setting up 6 trees of providers with following traits:
#
# compute node (cn)
# / \
# pf 0 pf 1
#
# +-----+----------------+---------------------+---------------------+
# | | cn | pf0 | pf1 |
# +-----+----------------+---------------------+---------------------+
# |tree1|HW_CPU_X86_AVX2 | |HW_NIC_OFFLOAD_GENEVE|
# +-----+----------------+---------------------+---------------------+
# |tree2|STORAGE_DISK_SSD| | |
# +-----+----------------+---------------------+---------------------+
# |tree3|HW_CPU_X86_AVX2 | | |
# | |STORAGE_DISK_SSD| | |
# +-----+----------------+---------------------+---------------------+
# |tree4| |HW_NIC_ACCEL_SSL | |
# | | |HW_NIC_OFFLOAD_GENEVE| |
# +-----+----------------+---------------------+---------------------+
# |tree5| |HW_NIC_ACCEL_SSL |HW_NIC_OFFLOAD_GENEVE|
# +-----+----------------+---------------------+---------------------+
# |tree6| |HW_NIC_ACCEL_SSL |HW_NIC_ACCEL_SSL |
# +-----+----------------+---------------------+---------------------+
# |tree7| | | |
# +-----+----------------+---------------------+---------------------+
#
rp_ids = set()
for x in ('1', '2', '3', '4', '5', '6', '7'):
name = 'cn' + x
cn = self._create_provider(name)
name = 'cn' + x + '_pf0'
pf0 = self._create_provider(name, parent=cn.uuid)
name = 'cn' + x + '_pf1'
pf1 = self._create_provider(name, parent=cn.uuid)
rp_ids |= set([cn.id, pf0.id, pf1.id])
if x == '1':
tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2)
tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE)
if x == '2':
tb.set_traits(cn, os_traits.STORAGE_DISK_SSD)
if x == '3':
tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2,
os_traits.STORAGE_DISK_SSD)
if x == '4':
tb.set_traits(pf0, os_traits.HW_NIC_ACCEL_SSL,
os_traits.HW_NIC_OFFLOAD_GENEVE)
if x == '5':
tb.set_traits(pf0, os_traits.HW_NIC_ACCEL_SSL)
tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE)
if x == '6':
tb.set_traits(pf0, os_traits.HW_NIC_ACCEL_SSL)
tb.set_traits(pf1, os_traits.HW_NIC_ACCEL_SSL)
avx2_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_CPU_X86_AVX2)
ssd_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.STORAGE_DISK_SSD)
geneve_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_NIC_OFFLOAD_GENEVE)
ssl_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_NIC_ACCEL_SSL)
return rp_ids, avx2_t, ssd_t, geneve_t, ssl_t
def test_get_trees_with_traits(self):
"""Creates a few provider trees having different traits and tests the
_get_trees_with_traits() utility function to ensure that only the
root provider IDs of matching traits are returned.
"""
rp_ids, avx2_t, ssd_t, geneve_t, ssl_t = self._make_trees_with_traits()
# Case1: required on root
required_traits = {
avx2_t.name: avx2_t.id,
}
forbidden_traits = {}
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
self.ctx, rp_ids, required_traits, forbidden_traits)
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
provider_names = ['cn1', 'cn3']
expect_root_ids = self._get_rp_ids_matching_names(provider_names)
self.assertEqual(expect_root_ids, tree_root_ids)
# Case1': required on root with forbidden traits
# Let's validate that cn3 dissapears
required_traits = {
avx2_t.name: avx2_t.id,
}
forbidden_traits = {
ssd_t.name: ssd_t.id,
}
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
self.ctx, rp_ids, required_traits, forbidden_traits)
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
provider_names = ['cn1']
expect_root_ids = self._get_rp_ids_matching_names(provider_names)
self.assertEqual(expect_root_ids, tree_root_ids)
# Case2: multiple required on root
required_traits = {
avx2_t.name: avx2_t.id,
ssd_t.name: ssd_t.id
}
forbidden_traits = {}
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
self.ctx, rp_ids, required_traits, forbidden_traits)
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
provider_names = ['cn3']
expect_root_ids = self._get_rp_ids_matching_names(provider_names)
self.assertEqual(expect_root_ids, tree_root_ids)
# Case3: required on child
required_traits = {
geneve_t.name: geneve_t.id
}
forbidden_traits = {}
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
self.ctx, rp_ids, required_traits, forbidden_traits)
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
provider_names = ['cn1', 'cn4', 'cn5']
expect_root_ids = self._get_rp_ids_matching_names(provider_names)
self.assertEqual(expect_root_ids, tree_root_ids)
# Case3': required on child with forbidden traits
# Let's validate that cn4 dissapears
required_traits = {
geneve_t.name: geneve_t.id
}
forbidden_traits = {
ssl_t.name: ssl_t.id
}
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
self.ctx, rp_ids, required_traits, forbidden_traits)
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
provider_names = ['cn1', 'cn5']
expect_root_ids = self._get_rp_ids_matching_names(provider_names)
self.assertEqual(expect_root_ids, tree_root_ids)
# Case4: multiple required on child
required_traits = {
geneve_t.name: geneve_t.id,
ssl_t.name: ssl_t.id
}
forbidden_traits = {}
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
self.ctx, rp_ids, required_traits, forbidden_traits)
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
provider_names = ['cn4', 'cn5']
expect_root_ids = self._get_rp_ids_matching_names(provider_names)
self.assertEqual(expect_root_ids, tree_root_ids)
# Case5: required on root and child
required_traits = {
avx2_t.name: avx2_t.id,
geneve_t.name: geneve_t.id
}
forbidden_traits = {}
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
self.ctx, rp_ids, required_traits, forbidden_traits)
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
provider_names = ['cn1']
expect_root_ids = self._get_rp_ids_matching_names(provider_names)
self.assertEqual(expect_root_ids, tree_root_ids)
def test_get_roots_with_traits(self):
_, avx2_t, ssd_t, geneve_t, ssl_t = self._make_trees_with_traits()
def do_test(required=None, forbidden=None, expected=None):
actual = res_ctx._get_roots_with_traits(
self.ctx,
set(trait.id for trait in required or []),
set(trait.id for trait in forbidden or []))
if expected:
expected = self._get_rp_ids_matching_names(
'cn%d' % d for d in expected)
self.assertEqual(expected or set(), actual)
# One of required/forbidden must be specified
self.assertRaises(ValueError, do_test)
# AVX2 is on cn1 and cn3
do_test(required=[avx2_t], expected=(1, 3))
# Multiple required
do_test(required=[avx2_t, ssd_t], expected=(3,))
# No match on roots for a trait on children
do_test(required=[geneve_t])
# ...even if including a trait also on roots
do_test(required=[geneve_t, ssd_t])
# Forbid traits not on any roots. These are on non-root providers...
do_test(forbidden=[geneve_t, ssl_t], expected=(1, 2, 3, 4, 5, 6, 7))
# ...and this one is nowhere in the environment.
hdd_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.STORAGE_DISK_HDD)
do_test(forbidden=[hdd_t], expected=(1, 2, 3, 4, 5, 6, 7))
# Forbid traits just on roots
do_test(forbidden=[avx2_t, ssd_t], expected=(4, 5, 6, 7))
# Forbid traits on roots and children
do_test(forbidden=[ssd_t, ssl_t, geneve_t], expected=(1, 4, 5, 6, 7))
# Required & forbidden both on roots
do_test(required=[avx2_t], forbidden=[ssd_t], expected=(1,))
# Same, but adding forbidden not on roots has no effect
do_test(required=[avx2_t], forbidden=[ssd_t, ssl_t], expected=(1,))
# Required on roots, forbidden only on children
do_test(
required=[avx2_t, ssd_t], forbidden=[ssl_t, geneve_t],
expected=(3,))
# Required & forbidden overlap. No results because it is impossible for
# one provider to both have and not have a trait. (Unreachable in real
# life due to conflict check in the handler.)
do_test(required=[avx2_t, ssd_t], forbidden=[ssd_t, geneve_t])
class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
"""Tests a variety of scenarios with both shared and non-shared resource
providers that the AllocationCandidates.get_by_requests() method returns a
set of alternative allocation requests and provider summaries that may be
used by the scheduler to sort/weigh the options it has for claiming
resources against providers.
"""
def setUp(self):
super(AllocationCandidatesTestCase, self).setUp()
self.requested_resources = {
orc.VCPU: 1,
orc.MEMORY_MB: 64,
orc.DISK_GB: 1500,
}
# For debugging purposes, populated by _create_provider and used by
# _validate_allocation_requests to make failure results more readable.
self.rp_uuid_to_name = {}
def _get_allocation_candidates(self, groups=None, rqparams=None):
if groups is None:
groups = {'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources)}
if rqparams is None:
rqparams = placement_lib.RequestWideParams()
return ac_obj.AllocationCandidates.get_by_requests(
self.ctx, groups, rqparams)
def _mappings_to_suffix(self, mappings):
"""Turn a dict of AllocationRequest mappings keyed on suffix to
a dict, keyed by uuid, of lists of suffixes.
"""
suffixes_by_uuid = collections.defaultdict(set)
for suffix, rps in mappings.items():
for rp_uuid in rps:
suffixes_by_uuid[rp_uuid].add(suffix)
listed_sorted_suffixes = {}
for rp_uuid, suffixes in suffixes_by_uuid.items():
listed_sorted_suffixes[rp_uuid] = sorted(list(suffixes))
return listed_sorted_suffixes
def _validate_allocation_requests(self, expected, candidates,
expect_suffixes=False):
"""Assert correctness of allocation requests in allocation candidates.
This is set up to make it easy for the caller to specify the expected
result, to make that expected structure readable for someone looking at
the test case, and to make test failures readable for debugging.
:param expected: A list of lists of tuples representing the expected
allocation requests, of the form:
[
[(resource_provider_name, resource_class_name, resource_count),
...,
],
...
]
:param candidates: The result from AllocationCandidates.get_by_requests
:param expect_suffixes: If True, validate the AllocationRequest
mappings in the results, found as a list of
suffixes in 4th member of the tuple described
above.
"""
# Extract/convert allocation requests from candidates
observed = []
for ar in candidates.allocation_requests:
suffixes_by_uuid = self._mappings_to_suffix(ar.mappings)
rrs = []
for rr in ar.resource_requests:
req_tuple = (self.rp_uuid_to_name[rr.resource_provider.uuid],
rr.resource_class, rr.amount)
if expect_suffixes:
req_tuple = (
req_tuple +
(suffixes_by_uuid[rr.resource_provider.uuid], ))
rrs.append(req_tuple)
rrs.sort()
observed.append(rrs)
observed.sort()
# Sort the guts of the expected structure
for rr in expected:
rr.sort()
expected.sort()
# Now we ought to be able to compare them
self.assertEqual(expected, observed)
def _validate_provider_summary_resources(self, expected, candidates):
"""Assert correctness of the resources in provider summaries in
allocation candidates.
This is set up to make it easy for the caller to specify the expected
result, to make that expected structure readable for someone looking at
the test case, and to make test failures readable for debugging.
:param expected: A dict, keyed by resource provider name, of sets of
3-tuples containing resource class, capacity, and
amount used:
{ resource_provider_name: set([
(resource_class, capacity, used),
...,
]),
...,
}
:param candidates: The result from AllocationCandidates.get_by_requests
"""
observed = {}
for psum in candidates.provider_summaries:
rpname = self.rp_uuid_to_name[psum.resource_provider.uuid]
reslist = set()
for res in psum.resources:
reslist.add((res.resource_class, res.capacity, res.used))
if rpname in observed:
self.fail("Found resource provider %s more than once in "
"provider_summaries!" % rpname)
observed[rpname] = reslist
# Now we ought to be able to compare them
self.assertEqual(expected, observed)
def _validate_provider_summary_traits(self, expected, candidates):
"""Assert correctness of the traits in provider summaries in allocation
candidates.
This is set up to make it easy for the caller to specify the expected
result, to make that expected structure readable for someone looking at
the test case, and to make test failures readable for debugging.
:param expected: A dict, keyed by resource provider name, of sets of
string trait names:
{ resource_provider_name: set([
trait_name, ...
]),
...,
}
:param candidates: The result from AllocationCandidates.get_by_requests
"""
observed = {}
for psum in candidates.provider_summaries:
rpname = self.rp_uuid_to_name[psum.resource_provider.uuid]
observed[rpname] = set(psum.traits)
self.assertEqual(expected, observed)
def test_unknown_traits(self):
missing = set(['UNKNOWN_TRAIT'])
requests = {'': placement_lib.RequestGroup(
use_same_provider=False, resources=self.requested_resources,
required_traits=missing)}
self.assertRaises(
exception.TraitNotFound,
ac_obj.AllocationCandidates.get_by_requests,
self.ctx, requests, placement_lib.RequestWideParams())
def test_allc_req_and_prov_summary(self):
"""Simply test with one resource provider that the allocation
requests returned by AllocationCandidates have valid
allocation_requests and provider_summaries.
"""
cn1 = self._create_provider('cn1')
tb.add_inventory(cn1, orc.VCPU, 8)
tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
tb.add_inventory(cn1, orc.DISK_GB, 2000)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 1
}
)}
)
expected = [
[('cn1', orc.VCPU, 1, [''])]
]
self._validate_allocation_requests(
expected, alloc_cands, expect_suffixes=True)
expected = {
'cn1': set([
(orc.VCPU, 8, 0),
(orc.MEMORY_MB, 2048, 0),
(orc.DISK_GB, 2000, 0)
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_all_local(self):
"""Create some resource providers that can satisfy the request for
resources with local (non-shared) resources and verify that the
allocation requests returned by AllocationCandidates correspond with
each of these resource providers.
"""
# Create three compute node providers with VCPU, RAM and local disk
cn1, cn2, cn3 = (self._create_provider(name)
for name in ('cn1', 'cn2', 'cn3'))
for cn in (cn1, cn2, cn3):
tb.add_inventory(cn, orc.VCPU, 24,
allocation_ratio=16.0)
tb.add_inventory(cn, orc.MEMORY_MB, 32768,
min_unit=64, step_size=64, allocation_ratio=1.5)
total_gb = 1000 if cn.name == 'cn3' else 2000
tb.add_inventory(cn, orc.DISK_GB, total_gb,
reserved=100, min_unit=10, step_size=10,
allocation_ratio=1.0)
# Ask for the alternative designer_family possibilities and verify each
# provider is returned
alloc_cands = self._get_allocation_candidates()
# Verify the provider summary information indicates 0 usage and
# capacity calculated from above inventory numbers for the first two
# compute nodes. The third doesn't show up because it lacks sufficient
# disk capacity.
expected = {
'cn1': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 32768 * 1.5, 0),
(orc.DISK_GB, 2000 - 100, 0),
]),
'cn2': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 32768 * 1.5, 0),
(orc.DISK_GB, 2000 - 100, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Verify the allocation requests that are returned. There should be 2
# allocation requests, one for each compute node, containing 3
# resources in each allocation request, one each for VCPU, RAM, and
# disk. The amounts of the requests should correspond to the requested
# resource amounts in the filter:resources dict passed to
# AllocationCandidates.get_by_requests().
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('cn1', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 1),
('cn2', orc.MEMORY_MB, 64),
('cn2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
# Now let's add traits into the mix. Currently, none of the compute
# nodes has the AVX2 trait associated with it, so we should get 0
# results if we required AVX2
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2])
)},
)
self._validate_allocation_requests([], alloc_cands)
# If we then associate the AVX2 trait to just compute node 2, we should
# get back just that compute node in the provider summaries
tb.set_traits(cn2, 'HW_CPU_X86_AVX2')
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2])
)},
)
# Only cn2 should be in our allocation requests now since that's the
# only one with the required trait
expected = [
[('cn2', orc.VCPU, 1),
('cn2', orc.MEMORY_MB, 64),
('cn2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
p_sums = alloc_cands.provider_summaries
self.assertEqual(1, len(p_sums))
expected = {
'cn2': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 32768 * 1.5, 0),
(orc.DISK_GB, 2000 - 100, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
expected = {
'cn2': set(['HW_CPU_X86_AVX2'])
}
self._validate_provider_summary_traits(expected, alloc_cands)
# Confirm that forbidden traits changes the results to get cn1.
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
forbidden_traits=set([os_traits.HW_CPU_X86_AVX2])
)},
)
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('cn1', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
def test_all_local_limit(self):
"""Create some resource providers that can satisfy the request for
resources with local (non-shared) resources, limit them, and verify
that the allocation requests returned by AllocationCandidates
correspond with each of these resource providers.
"""
# Create three compute node providers with VCPU, RAM and local disk
for name in ('cn1', 'cn2', 'cn3'):
cn = self._create_provider(name)
tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0)
tb.add_inventory(cn, orc.MEMORY_MB, 32768,
min_unit=64, step_size=64, allocation_ratio=1.5)
total_gb = 1000 if name == 'cn3' else 2000
tb.add_inventory(cn, orc.DISK_GB, total_gb,
reserved=100, min_unit=10, step_size=10,
allocation_ratio=1.0)
# Ask for just one candidate.
limit = 1
alloc_cands = self._get_allocation_candidates(
rqparams=placement_lib.RequestWideParams(limit=limit))
allocation_requests = alloc_cands.allocation_requests
self.assertEqual(limit, len(allocation_requests))
# provider summaries should have only one rp
self.assertEqual(limit, len(alloc_cands.provider_summaries))
# Do it again, with conf set to randomize. We can't confirm the
# random-ness but we can be sure the code path doesn't explode.
self.conf_fixture.config(randomize_allocation_candidates=True,
group='designer_family')
# Ask for two candidates.
limit = 2
alloc_cands = self._get_allocation_candidates(
rqparams=placement_lib.RequestWideParams(limit=limit))
allocation_requests = alloc_cands.allocation_requests
self.assertEqual(limit, len(allocation_requests))
# provider summaries should have two rps
self.assertEqual(limit, len(alloc_cands.provider_summaries))
# Do it again, asking for more than are available.
limit = 5
# We still only expect 2 because cn3 does not match default requests.
expected_length = 2
alloc_cands = self._get_allocation_candidates(
rqparams=placement_lib.RequestWideParams(limit=limit))
allocation_requests = alloc_cands.allocation_requests
self.assertEqual(expected_length, len(allocation_requests))
# provider summaries should have two rps
self.assertEqual(expected_length, len(alloc_cands.provider_summaries))
def test_local_with_shared_disk(self):
"""Create some resource providers that can satisfy the request for
resources with local VCPU and MEMORY_MB but rely on a shared storage
pool to satisfy DISK_GB and verify that the allocation requests
returned by AllocationCandidates have DISK_GB served up by the shared
storage pool resource provider and VCPU/MEMORY_MB by the compute node
providers
"""
# Create two compute node providers with VCPU, RAM and NO local disk,
# associated with the aggregate.
cn1, cn2 = (self._create_provider(name, uuids.agg)
for name in ('cn1', 'cn2'))
for cn in (cn1, cn2):
tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0)
tb.add_inventory(cn, orc.MEMORY_MB, 1024,
min_unit=64, allocation_ratio=1.5)
# Create the shared storage pool, asociated with the same aggregate
ss = self._create_provider('shared storage', uuids.agg)
# Give the shared storage pool some inventory of DISK_GB
tb.add_inventory(ss, orc.DISK_GB, 2000, reserved=100, min_unit=10)
# Mark the shared storage pool as having inventory shared among any
# provider associated via aggregate
tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE")
# Ask for the alternative designer_family possibilities and verify each
# compute node provider is listed in the allocation requests as well as
# the shared storage pool provider
alloc_cands = self._get_allocation_candidates()
# Verify the provider summary information indicates 0 usage and
# capacity calculated from above inventory numbers for both compute
# nodes and the shared provider.
expected = {
'cn1': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'cn2': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'shared storage': set([
(orc.DISK_GB, 2000 - 100, 0)
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Verify the allocation requests that are returned. There should be 2
# allocation requests, one for each compute node, containing 3
# resources in each allocation request, one each for VCPU, RAM, and
# disk. The amounts of the requests should correspond to the requested
# resource amounts in the filter:resources dict passed to
# AllocationCandidates.get_by_requests(). The providers for VCPU and
# MEMORY_MB should be the compute nodes while the provider for the
# DISK_GB should be the shared storage pool
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 1),
('cn2', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
# Test for bug #1705071. We query for allocation candidates with a
# request for ONLY the DISK_GB (the resource that is shared with
# compute nodes) and no VCPU/MEMORY_MB. Before the fix for bug
# #1705071, this resulted in a KeyError
alloc_cands = self._get_allocation_candidates(
groups={'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'DISK_GB': 10,
}
)}
)
# We should only have provider summary information for the sharing
# storage provider, since that's the only provider that can be
# allocated against for this request. In the future, we may look into
# returning the shared-with providers in the provider summaries, but
# that's a distant possibility.
expected = {
'shared storage': set([
(orc.DISK_GB, 2000 - 100, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# The allocation_requests will only include the shared storage
# provider because the only thing we're requesting to allocate is
# against the provider of DISK_GB, which happens to be the shared
# storage provider.
expected = [[('shared storage', orc.DISK_GB, 10)]]
self._validate_allocation_requests(expected, alloc_cands)
# Now we're going to add a set of required traits into the request mix.
# To start off, let's request a required trait that we know has not
# been associated yet with any provider, and ensure we get no results
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
)}
)
# We have not yet associated the AVX2 trait to any provider, so we
# should get zero allocation candidates
p_sums = alloc_cands.provider_summaries
self.assertEqual(0, len(p_sums))
# Now, if we then associate the required trait with both of our compute
# nodes, we should get back both compute nodes since they both now
# satisfy the required traits as well as the resource request
avx2_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_CPU_X86_AVX2)
cn1.set_traits([avx2_t])
cn2.set_traits([avx2_t])
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
)}
)
# There should be 2 compute node providers and 1 shared storage
# provider in the summaries.
expected = {
'cn1': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'cn2': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'shared storage': set([
(orc.DISK_GB, 2000 - 100, 0)
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Let's check that the traits listed for the compute nodes include the
# AVX2 trait, and the shared storage provider in the provider summaries
# does NOT have the AVX2 trait.
expected = {
'cn1': set(['HW_CPU_X86_AVX2']),
'cn2': set(['HW_CPU_X86_AVX2']),
'shared storage': set(['MISC_SHARES_VIA_AGGREGATE']),
}
self._validate_provider_summary_traits(expected, alloc_cands)
# Forbid the AVX2 trait
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
forbidden_traits=set([os_traits.HW_CPU_X86_AVX2]),
)}
)
# Should be no results as both cn1 and cn2 have the trait.
expected = []
self._validate_allocation_requests(expected, alloc_cands)
# Require the AVX2 trait but forbid CUSTOM_EXTRA_FASTER, which is
# added to cn2
tb.set_traits(cn2, 'CUSTOM_EXTRA_FASTER')
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
forbidden_traits=set(['CUSTOM_EXTRA_FASTER']),
)}
)
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
# Add disk to cn1, forbid sharing, and require the AVX2 trait.
# This should result in getting only cn1.
tb.add_inventory(cn1, orc.DISK_GB, 2048, allocation_ratio=1.5)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
forbidden_traits=set(['MISC_SHARES_VIA_AGGREGATE']),
)}
)
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('cn1', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
def test_local_with_shared_custom_resource(self):
"""Create some resource providers that can satisfy the request for
resources with local VCPU and MEMORY_MB but rely on a shared resource
provider to satisfy a custom resource requirement and verify that the
allocation requests returned by AllocationCandidates have the custom
resource served up by the shared custom resource provider and
VCPU/MEMORY_MB by the compute node providers
"""
# The aggregate that will be associated to everything...
agg_uuid = uuids.agg
# Create two compute node providers with VCPU, RAM and NO local
# CUSTOM_MAGIC resources, associated with the aggregate.
for name in ('cn1', 'cn2'):
cn = self._create_provider(name, agg_uuid)
tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0)
tb.add_inventory(cn, orc.MEMORY_MB, 1024,
min_unit=64, allocation_ratio=1.5)
# Create a custom resource called MAGIC
magic_rc = rc_obj.ResourceClass(
self.ctx,
name='CUSTOM_MAGIC',
)
magic_rc.create()
# Create the shared provider that serves CUSTOM_MAGIC, associated with
# the same aggregate
magic_p = self._create_provider('shared custom resource provider',
agg_uuid)
tb.add_inventory(magic_p, magic_rc.name, 2048, reserved=1024,
min_unit=10)
# Mark the magic provider as having inventory shared among any provider
# associated via aggregate
tb.set_traits(magic_p, "MISC_SHARES_VIA_AGGREGATE")
# The resources we will request
requested_resources = {
orc.VCPU: 1,
orc.MEMORY_MB: 64,
magic_rc.name: 512,
}
alloc_cands = self._get_allocation_candidates(
groups={'': placement_lib.RequestGroup(
use_same_provider=False, resources=requested_resources)})
# Verify the allocation requests that are returned. There should be 2
# allocation requests, one for each compute node, containing 3
# resources in each allocation request, one each for VCPU, RAM, and
# MAGIC. The amounts of the requests should correspond to the requested
# resource amounts in the filter:resources dict passed to
# AllocationCandidates.get_by_requests(). The providers for VCPU and
# MEMORY_MB should be the compute nodes while the provider for the
# MAGIC should be the shared custom resource provider.
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('shared custom resource provider', magic_rc.name, 512)],
[('cn2', orc.VCPU, 1),
('cn2', orc.MEMORY_MB, 64),
('shared custom resource provider', magic_rc.name, 512)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'cn2': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'shared custom resource provider': set([
(magic_rc.name, 1024, 0)
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_mix_local_and_shared(self):
# Create three compute node providers with VCPU and RAM, but only
# the third compute node has DISK. The first two computes will
# share the storage from the shared storage pool.
cn1, cn2 = (self._create_provider(name, uuids.agg)
for name in ('cn1', 'cn2'))
# cn3 is not associated with the aggregate
cn3 = self._create_provider('cn3')
for cn in (cn1, cn2, cn3):
tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0)
tb.add_inventory(cn, orc.MEMORY_MB, 1024,
min_unit=64, allocation_ratio=1.5)
# Only cn3 has disk
tb.add_inventory(cn3, orc.DISK_GB, 2000, reserved=100, min_unit=10)
# Create the shared storage pool in the same aggregate as the first two
# compute nodes
ss = self._create_provider('shared storage', uuids.agg)
# Give the shared storage pool some inventory of DISK_GB
tb.add_inventory(ss, orc.DISK_GB, 2000, reserved=100, min_unit=10)
tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE")
alloc_cands = self._get_allocation_candidates()
# Expect cn1, cn2, cn3 and ss in the summaries
expected = {
'cn1': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'cn2': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'cn3': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
(orc.DISK_GB, 2000 - 100, 0),
]),
'shared storage': set([
(orc.DISK_GB, 2000 - 100, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Expect three allocation requests: (cn1, ss), (cn2, ss), (cn3)
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 1),
('cn2', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
[('cn3', orc.VCPU, 1),
('cn3', orc.MEMORY_MB, 64),
('cn3', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
# Now we're going to add a set of required traits into the request mix.
# To start off, let's request a required trait that we know has not
# been associated yet with any provider, and ensure we get no results
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
)}
)
# We have not yet associated the AVX2 trait to any provider, so we
# should get zero allocation candidates
p_sums = alloc_cands.provider_summaries
self.assertEqual(0, len(p_sums))
a_reqs = alloc_cands.allocation_requests
self.assertEqual(0, len(a_reqs))
# Now, if we then associate the required trait with all of our compute
# nodes, we should get back all compute nodes since they all now
# satisfy the required traits as well as the resource request
for cn in (cn1, cn2, cn3):
tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2)
alloc_cands = self._get_allocation_candidates(
groups={'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
)}
)
# There should be 3 compute node providers and 1 shared storage
# provider in the summaries.
expected = {
'cn1': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'cn2': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
]),
'cn3': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
(orc.DISK_GB, 2000 - 100, 0),
]),
'shared storage': set([
(orc.DISK_GB, 2000 - 100, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Let's check that the traits listed for the compute nodes include the
# AVX2 trait, and the shared storage provider in the provider summaries
# does NOT have the AVX2 trait
expected = {
'cn1': set(['HW_CPU_X86_AVX2']),
'cn2': set(['HW_CPU_X86_AVX2']),
'cn3': set(['HW_CPU_X86_AVX2']),
'shared storage': set(['MISC_SHARES_VIA_AGGREGATE']),
}
self._validate_provider_summary_traits(expected, alloc_cands)
# Now, let's add a new wrinkle to the equation and add a required trait
# that will ONLY be satisfied by a compute node with local disk that
# has SSD drives. Set this trait only on the compute node with local
# disk (cn3)
tb.set_traits(cn3, os_traits.HW_CPU_X86_AVX2,
os_traits.STORAGE_DISK_SSD)
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([
os_traits.HW_CPU_X86_AVX2, os_traits.STORAGE_DISK_SSD
]),
)
})
# There should be only cn3 in the returned allocation candidates
expected = [
[('cn3', orc.VCPU, 1),
('cn3', orc.MEMORY_MB, 64),
('cn3', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn3': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
(orc.DISK_GB, 2000 - 100, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
expected = {
'cn3': set(['HW_CPU_X86_AVX2', 'STORAGE_DISK_SSD'])
}
self._validate_provider_summary_traits(expected, alloc_cands)
def test_common_rc(self):
"""Candidates when cn and shared have inventory in the same class."""
cn = self._create_provider('cn', uuids.agg1)
tb.add_inventory(cn, orc.VCPU, 24)
tb.add_inventory(cn, orc.MEMORY_MB, 2048)
tb.add_inventory(cn, orc.DISK_GB, 1600)
ss = self._create_provider('ss', uuids.agg1)
tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss, orc.DISK_GB, 2000)
alloc_cands = self._get_allocation_candidates()
# One allocation_request should have cn + ss; the other should have
# just the cn.
expected = [
[('cn', orc.VCPU, 1),
('cn', orc.MEMORY_MB, 64),
('cn', orc.DISK_GB, 1500)],
[('cn', orc.VCPU, 1),
('cn', orc.MEMORY_MB, 64),
('ss', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
(orc.DISK_GB, 1600, 0),
]),
'ss': set([
(orc.DISK_GB, 2000, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Next let's increase the requested DISK_GB
requested_resources = {
orc.VCPU: 1,
orc.MEMORY_MB: 64,
orc.DISK_GB: 1800,
}
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=requested_resources,
)}
)
expected = [
[('cn', orc.VCPU, 1),
('cn', orc.MEMORY_MB, 64),
('ss', orc.DISK_GB, 1800)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
(orc.DISK_GB, 1600, 0),
]),
'ss': set([
(orc.DISK_GB, 2000, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_common_rc_traits_split(self):
"""Validate filters when traits are split across cn and shared RPs."""
# NOTE(efried): This test case only applies to the scenario where we're
# requesting resources via the RequestGroup where
# use_same_provider=False
cn = self._create_provider('cn', uuids.agg1)
tb.add_inventory(cn, orc.VCPU, 24)
tb.add_inventory(cn, orc.MEMORY_MB, 2048)
tb.add_inventory(cn, orc.DISK_GB, 1600)
# The compute node's disk is SSD
tb.set_traits(cn, 'HW_CPU_X86_SSE', 'STORAGE_DISK_SSD')
ss = self._create_provider('ss', uuids.agg1)
tb.add_inventory(ss, orc.DISK_GB, 1600)
# The shared storage's disk is RAID
tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_RAID')
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set(['HW_CPU_X86_SSE', 'STORAGE_DISK_SSD',
'CUSTOM_RAID'])
)}
)
# TODO(efried): Bug #1724633: we'd *like* to get no candidates, because
# there's no single DISK_GB resource with both STORAGE_DISK_SSD and
# CUSTOM_RAID traits.
# expected = []
expected = [
[('cn', orc.VCPU, 1),
('cn', orc.MEMORY_MB, 64),
('ss', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
# expected = {}
expected = {
'cn': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
(orc.DISK_GB, 1600, 0),
]),
'ss': set([
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_only_one_sharing_provider(self):
ss1 = self._create_provider('ss1', uuids.agg1)
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, orc.IPV4_ADDRESS, 24)
tb.add_inventory(ss1, orc.SRIOV_NET_VF, 16)
tb.add_inventory(ss1, orc.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'IPV4_ADDRESS': 2,
'SRIOV_NET_VF': 1,
'DISK_GB': 1500,
}
)}
)
expected = [
[('ss1', orc.IPV4_ADDRESS, 2),
('ss1', orc.SRIOV_NET_VF, 1),
('ss1', orc.DISK_GB, 1500)]
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'ss1': set([
(orc.IPV4_ADDRESS, 24, 0),
(orc.SRIOV_NET_VF, 16, 0),
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_all_sharing_providers_no_rc_overlap(self):
ss1 = self._create_provider('ss1', uuids.agg1)
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, orc.IPV4_ADDRESS, 24)
ss2 = self._create_provider('ss2', uuids.agg1)
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, orc.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'IPV4_ADDRESS': 2,
'DISK_GB': 1500,
}
)}
)
expected = [
[('ss1', orc.IPV4_ADDRESS, 2),
('ss2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'ss1': set([
(orc.IPV4_ADDRESS, 24, 0),
]),
'ss2': set([
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_all_sharing_providers_no_rc_overlap_more_classes(self):
ss1 = self._create_provider('ss1', uuids.agg1)
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, orc.IPV4_ADDRESS, 24)
tb.add_inventory(ss1, orc.SRIOV_NET_VF, 16)
ss2 = self._create_provider('ss2', uuids.agg1)
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, orc.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'IPV4_ADDRESS': 2,
'SRIOV_NET_VF': 1,
'DISK_GB': 1500,
}
)}
)
expected = [
[('ss1', orc.IPV4_ADDRESS, 2),
('ss1', orc.SRIOV_NET_VF, 1),
('ss2', orc.DISK_GB, 1500)]
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'ss1': set([
(orc.IPV4_ADDRESS, 24, 0),
(orc.SRIOV_NET_VF, 16, 0)
]),
'ss2': set([
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_all_sharing_providers(self):
ss1 = self._create_provider('ss1', uuids.agg1)
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, orc.IPV4_ADDRESS, 24)
tb.add_inventory(ss1, orc.SRIOV_NET_VF, 16)
tb.add_inventory(ss1, orc.DISK_GB, 1600)
ss2 = self._create_provider('ss2', uuids.agg1)
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, orc.SRIOV_NET_VF, 16)
tb.add_inventory(ss2, orc.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates(groups={
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'IPV4_ADDRESS': 2,
'SRIOV_NET_VF': 1,
'DISK_GB': 1500,
}
)}
)
# We expect four candidates:
# - gets all the resources from ss1,
# - gets the SRIOV_NET_VF from ss2 and the rest from ss1,
# - gets the DISK_GB from ss2 and the rest from ss1,
# - gets SRIOV_NET_VF and DISK_GB from ss2 and rest from ss1
expected = [
[('ss1', orc.IPV4_ADDRESS, 2),
('ss1', orc.SRIOV_NET_VF, 1),
('ss1', orc.DISK_GB, 1500)],
[('ss1', orc.IPV4_ADDRESS, 2),
('ss1', orc.SRIOV_NET_VF, 1),
('ss2', orc.DISK_GB, 1500)],
[('ss1', orc.IPV4_ADDRESS, 2),
('ss2', orc.SRIOV_NET_VF, 1),
('ss1', orc.DISK_GB, 1500)],
[('ss1', orc.IPV4_ADDRESS, 2),
('ss2', orc.SRIOV_NET_VF, 1),
('ss2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'ss1': set([
(orc.IPV4_ADDRESS, 24, 0),
(orc.SRIOV_NET_VF, 16, 0),
(orc.DISK_GB, 1600, 0)
]),
'ss2': set([
(orc.SRIOV_NET_VF, 16, 0),
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_two_non_sharing_connect_to_one_sharing_different_aggregate(self):
# Covering the following setup:
#
# CN1 (VCPU) CN2 (VCPU)
# \ agg1 / agg2
# SS1 (DISK_GB)
#
# It is different from test_mix_local_and_shared as it uses two
# different aggregates to connect the two CNs to the share RP
cn1 = self._create_provider('cn1', uuids.agg1)
tb.add_inventory(cn1, orc.VCPU, 24)
tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
cn2 = self._create_provider('cn2', uuids.agg2)
tb.add_inventory(cn2, orc.VCPU, 24)
tb.add_inventory(cn2, orc.MEMORY_MB, 2048)
ss1 = self._create_provider('ss1', uuids.agg1, uuids.agg2)
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, orc.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'DISK_GB': 1500,
}
)}
)
expected = [
[('cn1', orc.VCPU, 2),
('ss1', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 2),
('ss1', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
]),
'cn2': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
]),
'ss1': set([
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_two_non_sharing_one_common_and_two_unique_sharing(self):
# Covering the following setup:
#
# CN1 (VCPU) CN2 (VCPU)
# / agg3 \ agg1 / agg1 \ agg2
# SS3 (IPV4) SS1 (DISK_GB) SS2 (IPV4)
cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg3)
tb.add_inventory(cn1, orc.VCPU, 24)
tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
cn2 = self._create_provider('cn2', uuids.agg1, uuids.agg2)
tb.add_inventory(cn2, orc.VCPU, 24)
tb.add_inventory(cn2, orc.MEMORY_MB, 2048)
# ss1 is connected to both cn1 and cn2
ss1 = self._create_provider('ss1', uuids.agg1)
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, orc.DISK_GB, 1600)
# ss2 only connected to cn2
ss2 = self._create_provider('ss2', uuids.agg2)
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, orc.IPV4_ADDRESS, 24)
# ss3 only connected to cn1
ss3 = self._create_provider('ss3', uuids.agg3)
tb.set_traits(ss3, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss3, orc.IPV4_ADDRESS, 24)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'DISK_GB': 1500,
'IPV4_ADDRESS': 2,
}
)}
)
expected = [
[('cn1', orc.VCPU, 2),
('ss1', orc.DISK_GB, 1500),
('ss3', orc.IPV4_ADDRESS, 2)],
[('cn2', orc.VCPU, 2),
('ss1', orc.DISK_GB, 1500),
('ss2', orc.IPV4_ADDRESS, 2)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
]),
'cn2': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
]),
'ss1': set([
(orc.DISK_GB, 1600, 0),
]),
'ss2': set([
(orc.IPV4_ADDRESS, 24, 0),
]),
'ss3': set([
(orc.IPV4_ADDRESS, 24, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_rc_not_split_between_sharing_and_non_sharing(self):
# cn1(VCPU,MEM) Non-sharing RP with some of the resources
# | agg1 aggregated with
# ss1(DISK) sharing RP that has the rest of the resources
#
# cn2(VCPU) Non-sharing with one of the resources;
# / agg2 \ aggregated with multiple sharing providers
# ss2_1(MEM) ss2_2(DISK) with different resources.
cn1 = self._create_provider('cn1', uuids.agg1)
tb.add_inventory(cn1, orc.VCPU, 24)
tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
ss1 = self._create_provider('ss1', uuids.agg1)
tb.add_inventory(ss1, orc.DISK_GB, 2000)
tb.set_traits(ss1, 'MISC_SHARES_VIA_AGGREGATE')
cn2 = self._create_provider('cn2', uuids.agg2)
tb.add_inventory(cn2, orc.VCPU, 24)
ss2_1 = self._create_provider('ss2_1', uuids.agg2)
tb.add_inventory(ss2_1, orc.MEMORY_MB, 2048)
tb.set_traits(ss2_1, 'MISC_SHARES_VIA_AGGREGATE')
ss2_2 = self._create_provider('ss2_2', uuids.agg2)
tb.add_inventory(ss2_2, orc.DISK_GB, 2000)
tb.set_traits(ss2_2, 'MISC_SHARES_VIA_AGGREGATE')
alloc_cands = self._get_allocation_candidates()
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('ss1', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 1),
('ss2_1', orc.MEMORY_MB, 64),
('ss2_2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
]),
'ss1': set([
(orc.DISK_GB, 2000, 0),
]),
'cn2': set([
(orc.VCPU, 24, 0),
]),
'ss2_1': set([
(orc.MEMORY_MB, 2048, 0),
]),
'ss2_2': set([
(orc.DISK_GB, 2000, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_multiple_sharing_providers_with_same_rc(self):
# cn1(VCPU,MEM) Non-sharing with some of the resources;
# / agg1 \ aggregated with multiple sharing providers
# ss1_1(DISK) ss1_2(DISK) with the same resource.
#
# cn2(VCPU) Non-sharing with one of the resources;
# / agg2 \ aggregated with multiple sharing providers
# ss2_1(MEM) ss2_2(DISK) with different resources.
cn1 = self._create_provider('cn1', uuids.agg1)
tb.add_inventory(cn1, orc.VCPU, 24)
tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
ss1_1 = self._create_provider('ss1_1', uuids.agg1)
tb.add_inventory(ss1_1, orc.DISK_GB, 2000)
tb.set_traits(ss1_1, 'MISC_SHARES_VIA_AGGREGATE')
ss1_2 = self._create_provider('ss1_2', uuids.agg1)
tb.add_inventory(ss1_2, orc.DISK_GB, 2000)
tb.set_traits(ss1_2, 'MISC_SHARES_VIA_AGGREGATE')
cn2 = self._create_provider('cn2', uuids.agg2)
tb.add_inventory(cn2, orc.VCPU, 24)
ss2_1 = self._create_provider('ss2_1', uuids.agg2)
tb.add_inventory(ss2_1, orc.MEMORY_MB, 2048)
tb.set_traits(ss2_1, 'MISC_SHARES_VIA_AGGREGATE')
ss2_2 = self._create_provider('ss2_2', uuids.agg2)
tb.add_inventory(ss2_2, orc.DISK_GB, 2000)
tb.set_traits(ss2_2, 'MISC_SHARES_VIA_AGGREGATE')
alloc_cands = self._get_allocation_candidates()
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('ss1_1', orc.DISK_GB, 1500)],
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('ss1_2', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 1),
('ss2_1', orc.MEMORY_MB, 64),
('ss2_2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
]),
'ss1_1': set([
(orc.DISK_GB, 2000, 0),
]),
'ss1_2': set([
(orc.DISK_GB, 2000, 0),
]),
'cn2': set([
(orc.VCPU, 24, 0),
]),
'ss2_1': set([
(orc.MEMORY_MB, 2048, 0),
]),
'ss2_2': set([
(orc.DISK_GB, 2000, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_sharing_providers_member_of(self):
# Covering the following setup:
#
# CN1 (VCPU, DISK_GB) CN2 (VCPU, DISK_GB)
# / agg1 \ agg2 / agg2 \ agg3
# SS1 (DISK_GB) SS2 (DISK_GB) SS3 (DISK_GB)
cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2)
tb.add_inventory(cn1, orc.VCPU, 24)
tb.add_inventory(cn1, orc.DISK_GB, 1600)
cn2 = self._create_provider('cn2', uuids.agg2, uuids.agg3)
tb.add_inventory(cn2, orc.VCPU, 24)
tb.add_inventory(cn2, orc.DISK_GB, 1600)
# ss1 is connected to cn1
ss1 = self._create_provider('ss1', uuids.agg1)
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, orc.DISK_GB, 1600)
# ss2 is connected to both cn1 and cn2
ss2 = self._create_provider('ss2', uuids.agg2)
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, orc.DISK_GB, 1600)
# ss3 is connected to cn2
ss3 = self._create_provider('ss3', uuids.agg3)
tb.set_traits(ss3, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss3, orc.DISK_GB, 1600)
# Let's get allocation candidates from agg1
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'DISK_GB': 1500,
},
member_of=[[uuids.agg1]]
)}
)
expected = [
[('cn1', orc.VCPU, 2),
('cn1', orc.DISK_GB, 1500)],
[('cn1', orc.VCPU, 2),
('ss1', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24, 0),
(orc.DISK_GB, 1600, 0),
]),
'ss1': set([
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Let's get allocation candidates from agg2
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'DISK_GB': 1500,
},
member_of=[[uuids.agg2]]
)}
)
expected = [
[('cn1', orc.VCPU, 2),
('cn1', orc.DISK_GB, 1500)],
[('cn1', orc.VCPU, 2),
('ss2', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 2),
('cn2', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 2),
('ss2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24, 0),
(orc.DISK_GB, 1600, 0),
]),
'cn2': set([
(orc.VCPU, 24, 0),
(orc.DISK_GB, 1600, 0),
]),
'ss2': set([
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Let's move to validate multiple member_of scenario
# The request from agg1 *AND* agg2 would provide only
# resources from cn1 with its local DISK
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'DISK_GB': 1500,
},
member_of=[[uuids.agg1], [uuids.agg2]]
)}
)
expected = [
[('cn1', orc.VCPU, 2),
('cn1', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24, 0),
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# The request from agg1 *OR* agg2 would provide five candidates
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'DISK_GB': 1500,
},
member_of=[[uuids.agg1, uuids.agg2]]
)}
)
expected = [
[('cn1', orc.VCPU, 2),
('cn1', orc.DISK_GB, 1500)],
[('cn1', orc.VCPU, 2),
('ss1', orc.DISK_GB, 1500)],
[('cn1', orc.VCPU, 2),
('ss2', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 2),
('cn2', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 2),
('ss2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24, 0),
(orc.DISK_GB, 1600, 0),
]),
'cn2': set([
(orc.VCPU, 24, 0),
(orc.DISK_GB, 1600, 0),
]),
'ss1': set([
(orc.DISK_GB, 1600, 0),
]),
'ss2': set([
(orc.DISK_GB, 1600, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_two_sharing_indirectly_connected_connecting_not_give_resource(
self):
# This covers the following setup
# CN1 (VCPU, MEMORY_MB)
# / \
# /agg1 \agg2
# / \
# SS1 ( SS2 (
# DISK_GB) IPV4_ADDRESS
# SRIOV_NET_VF)
# The request then made for resources from the sharing RPs only
ss1 = self._create_provider('ss1', uuids.agg1)
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, orc.DISK_GB, 1600)
cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2)
tb.add_inventory(cn1, orc.VCPU, 24)
tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
ss2 = self._create_provider('ss2', uuids.agg2)
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, orc.IPV4_ADDRESS, 24)
tb.add_inventory(ss2, orc.SRIOV_NET_VF, 16)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'IPV4_ADDRESS': 2,
'SRIOV_NET_VF': 1,
'DISK_GB': 1500,
}
)}
)
expected = [
[('ss1', orc.DISK_GB, 1500),
('ss2', orc.IPV4_ADDRESS, 2),
('ss2', orc.SRIOV_NET_VF, 1)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'ss1': set([
(orc.DISK_GB, 1600, 0),
]),
'ss2': set([
(orc.IPV4_ADDRESS, 24, 0),
(orc.SRIOV_NET_VF, 16, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_two_sharing_indirectly_connected_connecting_gives_resource(self):
# This covers the following setup
# CN1 (VCPU, MEMORY_MB)
# / \
# /agg1 \agg2
# / \
# SS1 ( SS2 (
# DISK_GB) IPV4_ADDRESS
# SRIOV_NET_VF)
# The request then made for resources from all three RPs
ss1 = self._create_provider('ss1', uuids.agg1)
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, orc.DISK_GB, 1600)
cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2)
tb.add_inventory(cn1, orc.VCPU, 24)
tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
ss2 = self._create_provider('ss2', uuids.agg2)
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, orc.IPV4_ADDRESS, 24)
tb.add_inventory(ss2, orc.SRIOV_NET_VF, 16)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'IPV4_ADDRESS': 2,
'SRIOV_NET_VF': 1,
'DISK_GB': 1500,
}
)}
)
expected = [
[('cn1', orc.VCPU, 2),
('ss1', orc.DISK_GB, 1500),
('ss2', orc.IPV4_ADDRESS, 2),
('ss2', orc.SRIOV_NET_VF, 1)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 24, 0),
(orc.MEMORY_MB, 2048, 0),
]),
'ss1': set([
(orc.DISK_GB, 1600, 0),
]),
'ss2': set([
(orc.IPV4_ADDRESS, 24, 0),
(orc.SRIOV_NET_VF, 16, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_simple_tree_of_providers(self):
"""Tests that we properly winnow allocation requests when including
traits in the request group and that the traits appear in the provider
summaries of the returned allocation candidates
"""
# We are setting up a single tree that looks like this:
#
# compute node (cn)
# / \
# / \
# numa cell 0 numa cell 1
# | |
# | |
# pf 0 pf 1
#
# The second physical function will be associated with the
# HW_NIC_OFFLOAD_GENEVE trait, but not the first physical function.
#
# We will issue a request to _get_allocation_candidates() for VCPU,
# MEMORY_MB and SRIOV_NET_VF **without** required traits, then include
# a request that includes HW_NIC_OFFLOAD_GENEVE. In the latter case,
# the compute node tree should be returned but the allocation requests
# should only include the second physical function since the required
# trait is only associated with that PF.
#
# Subsequently, we will consume all the SRIOV_NET_VF resources from the
# second PF's inventory and attempt the same request of resources and
# HW_NIC_OFFLOAD_GENEVE. We should get 0 returned results because now
# the only PF that has the required trait has no inventory left.
cn = self._create_provider('cn')
tb.add_inventory(cn, orc.VCPU, 16)
tb.add_inventory(cn, orc.MEMORY_MB, 32768)
numa_cell0 = self._create_provider('cn_numa0', parent=cn.uuid)
numa_cell1 = self._create_provider('cn_numa1', parent=cn.uuid)
pf0 = self._create_provider('cn_numa0_pf0', parent=numa_cell0.uuid)
tb.add_inventory(pf0, orc.SRIOV_NET_VF, 8)
pf1 = self._create_provider('cn_numa1_pf1', parent=numa_cell1.uuid)
tb.add_inventory(pf1, orc.SRIOV_NET_VF, 8)
tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE)
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
orc.MEMORY_MB: 256,
orc.SRIOV_NET_VF: 1,
}
)}
)
expected = [
[('cn', orc.VCPU, 2),
('cn', orc.MEMORY_MB, 256),
('cn_numa0_pf0', orc.SRIOV_NET_VF, 1)],
[('cn', orc.VCPU, 2),
('cn', orc.MEMORY_MB, 256),
('cn_numa1_pf1', orc.SRIOV_NET_VF, 1)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn': set([
(orc.VCPU, 16, 0),
(orc.MEMORY_MB, 32768, 0),
]),
'cn_numa0': set([]),
'cn_numa1': set([]),
'cn_numa0_pf0': set([
(orc.SRIOV_NET_VF, 8, 0),
]),
'cn_numa1_pf1': set([
(orc.SRIOV_NET_VF, 8, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
expected = {
'cn': set([]),
'cn_numa0': set([]),
'cn_numa1': set([]),
'cn_numa0_pf0': set([]),
'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
}
self._validate_provider_summary_traits(expected, alloc_cands)
# Now add required traits to the mix and verify we still get the same
# result (since we haven't yet consumed the second physical function's
# inventory of SRIOV_NET_VF.
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
orc.MEMORY_MB: 256,
orc.SRIOV_NET_VF: 1,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE],
)}
)
expected = [
[('cn', orc.VCPU, 2),
('cn', orc.MEMORY_MB, 256),
('cn_numa1_pf1', orc.SRIOV_NET_VF, 1)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn': set([
(orc.VCPU, 16, 0),
(orc.MEMORY_MB, 32768, 0),
]),
'cn_numa0': set([]),
'cn_numa1': set([]),
'cn_numa0_pf0': set([
(orc.SRIOV_NET_VF, 8, 0),
]),
'cn_numa1_pf1': set([
(orc.SRIOV_NET_VF, 8, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
expected = {
'cn': set([]),
'cn_numa0': set([]),
'cn_numa1': set([]),
'cn_numa0_pf0': set([]),
'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
}
self._validate_provider_summary_traits(expected, alloc_cands)
# Next we test that we get resources only on non-root providers
# without root providers involved
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.SRIOV_NET_VF: 1,
},
)}
)
expected = [
[('cn_numa0_pf0', orc.SRIOV_NET_VF, 1)],
[('cn_numa1_pf1', orc.SRIOV_NET_VF, 1)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn': set([
(orc.VCPU, 16, 0),
(orc.MEMORY_MB, 32768, 0),
]),
'cn_numa0': set([]),
'cn_numa1': set([]),
'cn_numa0_pf0': set([
(orc.SRIOV_NET_VF, 8, 0),
]),
'cn_numa1_pf1': set([
(orc.SRIOV_NET_VF, 8, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
expected = {
'cn': set([]),
'cn_numa0': set([]),
'cn_numa1': set([]),
'cn_numa0_pf0': set([]),
'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
}
self._validate_provider_summary_traits(expected, alloc_cands)
# Same, but with the request in a granular group, which hits a
# different code path.
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=True,
resources={
orc.SRIOV_NET_VF: 1,
},
)}
)
expected = [
[('cn_numa0_pf0', orc.SRIOV_NET_VF, 1)],
[('cn_numa1_pf1', orc.SRIOV_NET_VF, 1)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn': set([
(orc.VCPU, 16, 0),
(orc.MEMORY_MB, 32768, 0),
]),
'cn_numa0': set([]),
'cn_numa1': set([]),
'cn_numa0_pf0': set([
(orc.SRIOV_NET_VF, 8, 0),
]),
'cn_numa1_pf1': set([
(orc.SRIOV_NET_VF, 8, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
expected = {
'cn': set([]),
'cn_numa0': set([]),
'cn_numa1': set([]),
'cn_numa0_pf0': set([]),
'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]),
}
self._validate_provider_summary_traits(expected, alloc_cands)
# Now consume all the inventory of SRIOV_NET_VF on the second physical
# function (the one with HW_NIC_OFFLOAD_GENEVE associated with it) and
# verify that the same request still results in 0 results since the
# function with the required trait no longer has any inventory.
self.allocate_from_provider(pf1, orc.SRIOV_NET_VF, 8)
alloc_cands = self._get_allocation_candidates({
'':
placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
orc.MEMORY_MB: 256,
orc.SRIOV_NET_VF: 1,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE],
)
})
self._validate_allocation_requests([], alloc_cands)
self._validate_provider_summary_resources({}, alloc_cands)
self._validate_provider_summary_traits({}, alloc_cands)
def test_simple_tree_with_shared_provider(self):
"""Tests that we properly winnow allocation requests when including
shared and nested providers
"""
# We are setting up 2 cn trees with 2 shared storages
# that look like this:
#
# compute node (cn1) ----- shared storage (ss1)
# / \ agg1 with 2000 DISK_GB
# / \
# numa cell 1_0 numa cell 1_1
# | |
# | |
# pf 1_0 pf 1_1(HW_NIC_OFFLOAD_GENEVE)
#
# compute node (cn2) ----- shared storage (ss2)
# / \ agg2 with 1000 DISK_GB
# / \
# numa cell 2_0 numa cell 2_1
# | |
# | |
# pf 2_0 pf 2_1(HW_NIC_OFFLOAD_GENEVE)
#
# The second physical function in both trees (pf1_1, pf 2_1) will be
# associated with the HW_NIC_OFFLOAD_GENEVE trait, but not the first
# physical function.
#
# We will issue a request to _get_allocation_candidates() for VCPU,
# SRIOV_NET_VF and DISK_GB **without** required traits, then include
# a request that includes HW_NIC_OFFLOAD_GENEVE. In the latter case,
# the compute node tree should be returned but the allocation requests
# should only include the second physical function since the required
# trait is only associated with that PF.
cn1 = self._create_provider('cn1', uuids.agg1)
cn2 = self._create_provider('cn2', uuids.agg2)
tb.add_inventory(cn1, orc.VCPU, 16)
tb.add_inventory(cn2, orc.VCPU, 16)
numa1_0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
numa1_1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
numa2_0 = self._create_provider('cn2_numa0', parent=cn2.uuid)
numa2_1 = self._create_provider('cn2_numa1', parent=cn2.uuid)
pf1_0 = self._create_provider('cn1_numa0_pf0', parent=numa1_0.uuid)
pf1_1 = self._create_provider('cn1_numa1_pf1', parent=numa1_1.uuid)
pf2_0 = self._create_provider('cn2_numa0_pf0', parent=numa2_0.uuid)
pf2_1 = self._create_provider('cn2_numa1_pf1', parent=numa2_1.uuid)
tb.add_inventory(pf1_0, orc.SRIOV_NET_VF, 8)
tb.add_inventory(pf1_1, orc.SRIOV_NET_VF, 8)
tb.add_inventory(pf2_0, orc.SRIOV_NET_VF, 8)
tb.add_inventory(pf2_1, orc.SRIOV_NET_VF, 8)
tb.set_traits(pf2_1, os_traits.HW_NIC_OFFLOAD_GENEVE)
tb.set_traits(pf1_1, os_traits.HW_NIC_OFFLOAD_GENEVE)
ss1 = self._create_provider('ss1', uuids.agg1)
ss2 = self._create_provider('ss2', uuids.agg2)
tb.add_inventory(ss1, orc.DISK_GB, 2000)
tb.add_inventory(ss2, orc.DISK_GB, 1000)
tb.set_traits(ss1, 'MISC_SHARES_VIA_AGGREGATE')
tb.set_traits(ss2, 'MISC_SHARES_VIA_AGGREGATE')
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
orc.SRIOV_NET_VF: 1,
orc.DISK_GB: 1500,
})
})
# cn2 is not in the allocation candidates because it doesn't have
# enough DISK_GB resource with shared providers.
expected = [
[('cn1', orc.VCPU, 2),
('cn1_numa0_pf0', orc.SRIOV_NET_VF, 1),
('ss1', orc.DISK_GB, 1500)],
[('cn1', orc.VCPU, 2),
('cn1_numa1_pf1', orc.SRIOV_NET_VF, 1),
('ss1', orc.DISK_GB, 1500)]
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 16, 0)
]),
'cn1_numa0': set([]),
'cn1_numa1': set([]),
'cn1_numa0_pf0': set([
(orc.SRIOV_NET_VF, 8, 0)
]),
'cn1_numa1_pf1': set([
(orc.SRIOV_NET_VF, 8, 0)
]),
'ss1': set([
(orc.DISK_GB, 2000, 0)
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Now add required traits to the mix and verify we still get the
# inventory of SRIOV_NET_VF.
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
orc.SRIOV_NET_VF: 1,
orc.DISK_GB: 1500,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE])
})
# cn1_numa0_pf0 is not in the allocation candidates because it
# doesn't have the required trait.
expected = [
[('cn1', orc.VCPU, 2),
('cn1_numa1_pf1', orc.SRIOV_NET_VF, 1),
('ss1', orc.DISK_GB, 1500)]
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn1': set([
(orc.VCPU, 16, 0)
]),
'cn1_numa0': set([]),
'cn1_numa1': set([]),
'cn1_numa0_pf0': set([
(orc.SRIOV_NET_VF, 8, 0)
]),
'cn1_numa1_pf1': set([
(orc.SRIOV_NET_VF, 8, 0)
]),
'ss1': set([
(orc.DISK_GB, 2000, 0)
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def _create_nested_trees(self):
# We are setting up 2 identical compute trees with no storage
# that look like this:
#
# compute node (cn1)
# / \
# / \
# numa cell 1_0 numa cell 1_1
# | |
# | |
# pf 1_0 pf 1_1
#
# compute node (cn2)
# / \
# / \
# numa cell 2_0 numa cell 2_1
# | |
# | |
# pf 2_0 pf 2_1
#
cn1 = self._create_provider('cn1', uuids.agg1)
cn2 = self._create_provider('cn2', uuids.agg2)
tb.add_inventory(cn1, orc.VCPU, 16)
tb.add_inventory(cn2, orc.VCPU, 16)
numa1_0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
numa1_1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
numa2_0 = self._create_provider('cn2_numa0', parent=cn2.uuid)
numa2_1 = self._create_provider('cn2_numa1', parent=cn2.uuid)
pf1_0 = self._create_provider('cn1_numa0_pf0', parent=numa1_0.uuid)
pf1_1 = self._create_provider('cn1_numa1_pf1', parent=numa1_1.uuid)
pf2_0 = self._create_provider('cn2_numa0_pf0', parent=numa2_0.uuid)
pf2_1 = self._create_provider('cn2_numa1_pf1', parent=numa2_1.uuid)
tb.add_inventory(pf1_0, orc.SRIOV_NET_VF, 8)
tb.add_inventory(pf1_1, orc.SRIOV_NET_VF, 8)
tb.add_inventory(pf2_0, orc.SRIOV_NET_VF, 8)
tb.add_inventory(pf2_1, orc.SRIOV_NET_VF, 8)
def test_nested_result_count_none(self):
"""Tests that we properly winnow allocation requests when including
nested providers from different request groups with group policy none.
"""
self._create_nested_trees()
# Make a granular request to check count of results.
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
}),
'_NET1': placement_lib.RequestGroup(
use_same_provider=True,
resources={
orc.SRIOV_NET_VF: 1,
}),
'_NET2': placement_lib.RequestGroup(
use_same_provider=True,
resources={
orc.SRIOV_NET_VF: 1,
}),
}, rqparams=placement_lib.RequestWideParams(group_policy='none'))
# 4 VF providers each providing 2, 1, or 0 inventory makes 6
# different combinations, plus two more that are effectively
# the same but satisfying different suffix mappings.
self.assertEqual(8, len(alloc_cands.allocation_requests))
def test_nested_result_count_different_amounts_isolate(self):
"""Tests that we properly winnow allocation requests when including
nested providers from different request groups, with different
requested amounts.
"""
self._create_nested_trees()
# Make a granular request to check count of results.
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
}),
'_NET1': placement_lib.RequestGroup(
use_same_provider=True,
resources={
orc.SRIOV_NET_VF: 1,
}),
'_NET2': placement_lib.RequestGroup(
use_same_provider=True,
resources={
orc.SRIOV_NET_VF: 2,
}),
}, rqparams=placement_lib.RequestWideParams(group_policy='isolate'))
self.assertEqual(4, len(alloc_cands.allocation_requests))
def test_nested_result_suffix_mappings(self):
"""Confirm that paying attention to suffix mappings expands
the quantity of results and confirm those results.
"""
self._create_nested_trees()
# Make a granular request to check count and suffixes of results.
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
}),
'_NET1': placement_lib.RequestGroup(
use_same_provider=True,
resources={
orc.SRIOV_NET_VF: 1,
}),
'_NET2': placement_lib.RequestGroup(
use_same_provider=True,
resources={
orc.SRIOV_NET_VF: 1,
}),
}, rqparams=placement_lib.RequestWideParams(group_policy='isolate'))
expected = [
[('cn1', orc.VCPU, 2, ['']),
('cn1_numa0_pf0', orc.SRIOV_NET_VF, 1, ['_NET1']),
('cn1_numa1_pf1', orc.SRIOV_NET_VF, 1, ['_NET2'])],
[('cn1', orc.VCPU, 2, ['']),
('cn1_numa0_pf0', orc.SRIOV_NET_VF, 1, ['_NET2']),
('cn1_numa1_pf1', orc.SRIOV_NET_VF, 1, ['_NET1'])],
[('cn2', orc.VCPU, 2, ['']),
('cn2_numa0_pf0', orc.SRIOV_NET_VF, 1, ['_NET1']),
('cn2_numa1_pf1', orc.SRIOV_NET_VF, 1, ['_NET2'])],
[('cn2', orc.VCPU, 2, ['']),
('cn2_numa0_pf0', orc.SRIOV_NET_VF, 1, ['_NET2']),
('cn2_numa1_pf1', orc.SRIOV_NET_VF, 1, ['_NET1'])],
]
# Near the end of _merge candidates we expect 4 different collections
# of AllocationRequest to attempt to be added to a set. Admittance is
# controlled by the __hash__ and __eq__ of the AllocationRequest which,
# in this case, should keep the results at 4 since they are defined to
# be different when they have different suffixes even if they have the
# same resource provider, the same resource class and the same desired
# amount.
self.assertEqual(4, len(alloc_cands.allocation_requests))
self._validate_allocation_requests(
expected, alloc_cands, expect_suffixes=True)
def test_nested_result_suffix_mappings_non_isolated(self):
"""Confirm that paying attention to suffix mappings expands
the quantity of results and confirm those results.
"""
self._create_nested_trees()
# Make a granular request to check count and suffixes of results.
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
orc.VCPU: 2,
}),
'_NET1': placement_lib.RequestGroup(
use_same_provider=True,
resources={
orc.SRIOV_NET_VF: 1,
}),
'_NET2': placement_lib.RequestGroup(
use_same_provider=True,
resources={
orc.SRIOV_NET_VF: 1,
}),
}, rqparams=placement_lib.RequestWideParams(group_policy='none'))
# We get four candidates from each compute node:
# [A] Two where one VF comes from each PF+RequestGroup combination.
# [B] Two where both VFs come from the same PF (which satisfies both
# RequestGroupZ).
expected = [
# [A] (cn1)
[('cn1', orc.VCPU, 2, ['']),
('cn1_numa0_pf0', orc.SRIOV_NET_VF, 1, ['_NET1']),
('cn1_numa1_pf1', orc.SRIOV_NET_VF, 1, ['_NET2'])],
[('cn1', orc.VCPU, 2, ['']),
('cn1_numa0_pf0', orc.SRIOV_NET_VF, 1, ['_NET2']),
('cn1_numa1_pf1', orc.SRIOV_NET_VF, 1, ['_NET1'])],
# [B] (cn1)
[('cn1', orc.VCPU, 2, ['']),
('cn1_numa0_pf0', orc.SRIOV_NET_VF, 2, ['_NET1', '_NET2'])],
[('cn1', orc.VCPU, 2, ['']),
('cn1_numa1_pf1', orc.SRIOV_NET_VF, 2, ['_NET1', '_NET2'])],
# [A] (cn2)
[('cn2', orc.VCPU, 2, ['']),
('cn2_numa0_pf0', orc.SRIOV_NET_VF, 1, ['_NET1']),
('cn2_numa1_pf1', orc.SRIOV_NET_VF, 1, ['_NET2'])],
[('cn2', orc.VCPU, 2, ['']),
('cn2_numa0_pf0', orc.SRIOV_NET_VF, 1, ['_NET2']),
('cn2_numa1_pf1', orc.SRIOV_NET_VF, 1, ['_NET1'])],
# [B] (cn2)
[('cn2', orc.VCPU, 2, ['']),
('cn2_numa0_pf0', orc.SRIOV_NET_VF, 2, ['_NET1', '_NET2'])],
[('cn2', orc.VCPU, 2, ['']),
('cn2_numa1_pf1', orc.SRIOV_NET_VF, 2, ['_NET1', '_NET2'])],
]
self.assertEqual(8, len(alloc_cands.allocation_requests))
self._validate_allocation_requests(
expected, alloc_cands, expect_suffixes=True)
| 40.496341
| 79
| 0.5647
|
07d3a4a000cfe87a42cb705aa06f32d4bef5678d
| 13,417
|
py
|
Python
|
dashlib/dash_tx.py
|
Ecrypty/florijncoinmnb
|
ad51ddcf19fde140a19dddd2691b2c5e248a4197
|
[
"MIT"
] | null | null | null |
dashlib/dash_tx.py
|
Ecrypty/florijncoinmnb
|
ad51ddcf19fde140a19dddd2691b2c5e248a4197
|
[
"MIT"
] | null | null | null |
dashlib/dash_tx.py
|
Ecrypty/florijncoinmnb
|
ad51ddcf19fde140a19dddd2691b2c5e248a4197
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
import re
import binascii
import hashlib
import simplejson as json
from florijncoin_hashs import *
from florijncoin_script import *
def deserialize_script(script):
if isinstance(script, str) and re.match('^[0-9a-fA-F]*$', script):
return json_changebase(deserialize_script(bytes.fromhex(script)),
lambda x: safe_hexlify(x))
# return json_changebase(deserialize_script(binascii.unhexlify(script)),
# lambda x: safe_hexlify(x))
out, pos = [], 0
while pos < len(script):
code = from_byte_to_int(script[pos])
if code == 0:
out.append(None)
pos += 1
elif code <= 75:
out.append(script[pos + 1:pos + 1 + code])
pos += 1 + code
elif code <= 78:
szsz = pow(2, code - 76)
sz = decode(script[pos + szsz: pos:-1], 256)
out.append(script[pos + 1 + szsz:pos + 1 + szsz + sz])
pos += 1 + szsz + sz
elif code <= 96:
out.append(code - 80)
pos += 1
else:
out.append(code)
pos += 1
return out
def deserialize(tx):
if isinstance(tx, str) and re.match('^[0-9a-fA-F]*$', tx):
return json_changebase(deserialize(bytes.fromhex(tx)),
lambda x: safe_hexlify(x))
# return json_changebase(deserialize(binascii.unhexlify(tx)),
# lambda x: safe_hexlify(x))
pos = [0]
def read_as_int(bytez):
pos[0] += bytez
return decode(tx[pos[0] - bytez:pos[0]][::-1], 256)
def read_var_int():
pos[0] += 1
val = from_byte_to_int(tx[pos[0] - 1])
if val < 253:
return val
return read_as_int(pow(2, val - 252))
def read_bytes(bytez):
pos[0] += bytez
return tx[pos[0] - bytez:pos[0]]
def read_var_string():
size = read_var_int()
return read_bytes(size)
obj = {"ins": [], "outs": []}
obj["version"] = read_as_int(4)
ins = read_var_int()
for i in range(ins):
obj["ins"].append({
"outpoint": {
"hash": read_bytes(32)[::-1],
"index": read_as_int(4)
},
"script": read_var_string(),
"sequence": read_as_int(4)
})
outs = read_var_int()
for i in range(outs):
obj["outs"].append({
"n": i,
"value": read_as_int(8),
"script": read_var_string()
})
obj["locktime"] = read_as_int(4)
return obj
def decoderawtx(rawtx):
txo = deserialize(rawtx)
txid = format_hash(double_sha256(bytes.fromhex(rawtx)))
#txid = format_hash(double_sha256(binascii.unhexlify(rawtx)))
# print(txid)
#print(json.dumps(txo, sort_keys=True, indent=4, separators=(',', ': ')))
addrcheck = {}
addrfromall = []
for x in txo.get('ins'):
hashn = x.get('outpoint')['hash']
if hashn != '0000000000000000000000000000000000000000000000000000000000000000':
des_script = deserialize_script(x.get('script'))
addrn = script_to_addr(des_script)
if (addrn != 'pay_to_pubkey'
and addrn != 'unspendable'
and addrn != 'nulldata'
and addrn != 'invalid'):
addrcheck['good'] = {
"hashin": hashn + '-' + str(x.get('outpoint')['index']),
"addrfrom": addrn
}
addrfromall.append(addrn)
elif (addrn == 'pay_to_pubkey'):
addrcheck['pubkey'] = {
"hashin": hashn + '-' + str(x.get('outpoint')['index']),
"addrfrom": 'pay_to_pubkey'
}
addrfromall.append('pay_to_pubkey')
elif (addrn == 'pay_to_scripthash'):
addrcheck['pubkey'] = {
"hashin": hashn + '-' + str(x.get('outpoint')['index']),
"addrfrom": 'pay_to_scripthash'
}
addrfromall.append('pay_to_scripthash')
else:
addrcheck['coinbase'] = {
"hashin": '0000000000000000000000000000000000000000000000000000000000000000' +
'-' +
str(0),
"addrfrom": 'coinbase'}
addrfromall.append('coinbase')
# use last input
if addrcheck.get('coinbase', None) is not None:
hashin = addrcheck.get('coinbase')['hashin']
addrfrom = addrcheck.get('coinbase')['addrfrom']
if addrcheck.get('pubkey', None) is not None:
hashin = addrcheck.get('pubkey')['hashin']
addrfrom = addrcheck.get('pubkey')['addrfrom']
if addrcheck.get('good', None) is not None:
hashin = addrcheck.get('good')['hashin']
addrfrom = addrcheck.get('good')['addrfrom']
#print(json.dumps(addrcheck, sort_keys=True, indent=4, separators=(',', ': ')))
addrval = {}
for x in txo.get('outs'):
script = x.get('script')
valout = x.get('value')
outno = x.get('n')
value = str('{0:.8f}'.format(float(valout / 1e8)))
addrto = script_to_addr(script)
hashout = txid + '-' + str(outno)
#print(hashout, addrto, value)
addrval[addrto] = {
"from": addrfrom,
"fromall": addrfromall,
"hashin": hashin,
"txid": hashout,
"to": addrto,
"value": value
}
return addrval
if __name__ == "__main__":
def check_rawtx(data):
x = decoderawtx(data)
#print(json.dumps(x, sort_keys=True, indent=4, separators=(',', ': ')))
return x
#{
# "XpMzawqNtHuewvnvgaMPDGoP761R7MpkFV": {
# "from": "XgijdekDojPYXPBvdJqKND4LV41tV7EgZ6",
# "hashin": "a2b77a4c704b7044cc15a0a7a78c9848e5edf7630106f211d471a3a4aa6887e7-0",
# "to": "XpMzawqNtHuewvnvgaMPDGoP761R7MpkFV",
# "txid": "9f5fb5805f1c200e85ce92d5658c959329e3bac02ca5f21801ad9274f1bbc2a1-1",
# "value": "24.76163455"
# },
# "XubToXRwAVu3y4LuAQ6cPqfNnTGnM9uJ9w": {
# "from": "XgijdekDojPYXPBvdJqKND4LV41tV7EgZ6",
# "hashin": "a2b77a4c704b7044cc15a0a7a78c9848e5edf7630106f211d471a3a4aa6887e7-0",
# "to": "XubToXRwAVu3y4LuAQ6cPqfNnTGnM9uJ9w",
# "txid": "9f5fb5805f1c200e85ce92d5658c959329e3bac02ca5f21801ad9274f1bbc2a1-0",
# "value": "0.01000111"
# }
#}
check_rawtx('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff06039ab7010101ffffffff0240230e4300000000232103717f7082f58395f02afb45b1ae871cae31293b33c64c8d9568d9cac09fa70c51ac40230e43000000001976a9146753f211b0fb9ec2b5db90a0a4e08169c25629a388ac00000000')
check_rawtx('010000000128efb830f177a0fb4c4f0f3d7fee81e46a61c36ebd06b6d5ad5945f2f384f69d010000006b483045022100ea9275dad2aa4f17cd55409d87e1de80e86e14413f9419329dd06cb3f1fde35a0220535e251becb19eb3aec82ef28cdf8f60fe3eee8c9f08e0d7759d32a9e3fdf284012102d1c997d942867336302bd9e5c28f109cf851df0ceeee25563b4f36ae83a2bf2bffffffff020dc0530d000000001976a9147f561dc61197267f553385f53f8eb9623f0a472e88ac62d30928000000001976a914d36f11b42b491b9204d11c34f70f045271031e9988ac00000000')
check_rawtx('010000000148de3fa6f33c9242450f3258c92ea07f8c74d76d389903e362d15dd9893f1f4a010000006a473044022034a8b4107fb6b25ce724ab7daa4217c9ca2f5c1b156066847afae3bef7bcd463022010b2ae51a9a6b5b344defe3d918e13a906cb3e75364680f3400d3accd22dc1a70121037d75a42ea77cb436fdfe04ac52fa708ec9a2534c082596430fd7da5214d46edcffffffff01f03dcd1d000000001976a914606b2b996ea1255b73fec47318589bd6d94c373388ac00000000')
check_rawtx('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0603cb6a020101ffffffff0240230e43000000002321020c28102cb4f627e0760f167f85ce26fef587db4c60b9f6169cb8363ebab08e34ac40230e43000000001976a9143653b2eb98204e42b0a1ee143aabcad17ef3008a88ac00000000')
y = check_rawtx('0100000009841b4c2286d71e3999a853a4af2a8ef211f6ad689c0dcd94a4793ae8a91ef509940400006a4730440220357af1247e26fb49ed16e67171e152953311b74750301f17e8bfaf802a436cc1022006856054b8e81bdd8c6945184adeef3aae88afa440d77cf689eebc1d6274f87b0121034c5f36809f0c77b3d09f9006a7c4756a85bbca700f87615dca153e7df5b83d93ffffffff0df38e8b4e91c081aa0d048bf75554a832ce3fa42aee7b161e54b23acf3c1947010000006a473044022005d147a984d15465a5acbab31e843c2efdf49f6b2fc369718e51069f739128ee022072854e97a4c7b61337b1c8319321f5e893054358cd22de68593360e1c0326343012102bbfd9d1b28bf9b10ef0c8d4902da1fb3ddc6677508615e3f78a12b2983546014ffffffff23fece7d8f4554fe8897b218ca3f57381e026749ab95df16da357cf798890935cc0100006b483045022100f84f8a65aa53d4ae9f617cf3245d187d6384697c11d06f73d71d39c8ac722b2b022065a33292553b52a5d121f455607c76dfc6ce97fe8e05548b613bd5f65b66c64201210265a4b95950f7ad12d2ab73da6bd299d59c7a69ec47a3fee33e9fcd004392755effffffff23fece7d8f4554fe8897b218ca3f57381e026749ab95df16da357cf7988909350c0200006a4730440220563fce2d03ea7b55e2bfaffe45302f0c57849de4ea88b20c0089c3ec7603df4602207bf4a41ee3c841d19fcdfaef2392b9d525cca2118372750c0d8eba444fbf05c2012103d2fb8a0aa072cbaae66193635a5eb9d426e8757581caa065a21208e4d69af360ffffffff23fece7d8f4554fe8897b218ca3f57381e026749ab95df16da357cf798890935f00400006b483045022100e6936362d0c58eb7a50765def3f5cab68625e28b07b4355c2d1952611758ec970220509c1755bfd0436d230c71deb0dcbbc1b9c3dd2ce1e7be10371a87238ab1b10401210235f28f89f7cbc6dd3ce6bd75d0f53bff0ee50d4e5a37697cd45d40ea57354b48ffffffff4436a7b04aaa0f7e81b7569f8034b54d19341cb6780aa0cec141a6d53f7281dbdb0100006b483045022100e34577fcc17c5d3a78228c89e409c3c8c0c412fb0527354153a0d47b263b03250220524230d2adab348f80152b986858625913e2b5a4dfe6afc07e7612f78c4362de01210236b9dd2a08c2c8e984314e7cfa44c68c7cc190c9a9f6c8535b356d07bae85471ffffffff5bf3871b3327cabf4e5a889a6d96bbff1a2aac9a30fd5ce5756566382faf9903010000006b483045022100cca94c953b916c7c719e25c941d430779d7073dae36514f1acd7e6554a52217f02207f36fe98c9af33aa0328f69f71b6961f011a291323a9aef1ae9880333c60c4680121030076ad42e6b867a67c64d2162cabbfe3c1bc10b74b929dbba84e8a1078014275ffffffff818bae67cfa20257360bb4e46a021acf69c496d123079c627d6869bba0e8d33f010000006b483045022100b306eb03f55bba8cb352e8b792da975cd2210d9d87f2d897564847e7b6df46bc022036e48d08d88862abd3582763544260cf6ceb8dd9309a362f6b2c4914a5d338230121030076ad42e6b867a67c64d2162cabbfe3c1bc10b74b929dbba84e8a1078014275ffffffffe78768aaa4a371d411f2060163f7ede548988ca7a7a015cc44704b704c7ab7a2000000006b483045022100b603160b54cc50cffae77880b6f526cb19023c82d97c8c273025815d1b647e450220528165b33d34f62f69608794f0dcd774825c98c9cfb6550473686dd0c371380a0121030d23a8a9eef8d67a9c15b95c276b66676ade4e4d4a17ad5808b54cdaa02810e7ffffffff02af420f00000000001976a914cf65eba85ceb2f0e2c9c9fb0f635e8548b5a301e88ac7f419793000000001976a91496012a2395d9bec24462531c3681077eedc43d1988ac00000000')
assert y.get('XpMzawqNtHuewvnvgaMPDGoP761R7MpkFV').get(
'from') == 'XgijdekDojPYXPBvdJqKND4LV41tV7EgZ6'
assert y.get('XpMzawqNtHuewvnvgaMPDGoP761R7MpkFV').get(
'hashin') == 'a2b77a4c704b7044cc15a0a7a78c9848e5edf7630106f211d471a3a4aa6887e7-0'
assert y.get('XpMzawqNtHuewvnvgaMPDGoP761R7MpkFV').get(
'to') == 'XpMzawqNtHuewvnvgaMPDGoP761R7MpkFV'
assert y.get('XpMzawqNtHuewvnvgaMPDGoP761R7MpkFV').get(
'txid') == '9f5fb5805f1c200e85ce92d5658c959329e3bac02ca5f21801ad9274f1bbc2a1-1'
assert y.get('XpMzawqNtHuewvnvgaMPDGoP761R7MpkFV').get(
'value') == '24.76163455'
assert y.get('XubToXRwAVu3y4LuAQ6cPqfNnTGnM9uJ9w').get(
'from') == 'XgijdekDojPYXPBvdJqKND4LV41tV7EgZ6'
assert y.get('XubToXRwAVu3y4LuAQ6cPqfNnTGnM9uJ9w').get(
'hashin') == 'a2b77a4c704b7044cc15a0a7a78c9848e5edf7630106f211d471a3a4aa6887e7-0'
assert y.get('XubToXRwAVu3y4LuAQ6cPqfNnTGnM9uJ9w').get(
'to') == 'XubToXRwAVu3y4LuAQ6cPqfNnTGnM9uJ9w'
assert y.get('XubToXRwAVu3y4LuAQ6cPqfNnTGnM9uJ9w').get(
'txid') == '9f5fb5805f1c200e85ce92d5658c959329e3bac02ca5f21801ad9274f1bbc2a1-0'
assert y.get('XubToXRwAVu3y4LuAQ6cPqfNnTGnM9uJ9w').get(
'value') == '0.01000111'
x = check_rawtx('0100000001eac2334fe1c4b78f96b4331eca14bf66c639b303663164f04c3884ff9985b02500000000fc004730440220304a0866f288ae6455b45cbaf856492eb99228eed309293c044a897584cb0b8602206a8b5737d4deaa1eaecd3b3d94b87129136e58d48f60328982da3cb2b1663df801473044022040bf0a8ca4c63bf8719c85028190c7279480be0e8144b48b2d5d87ea7284965302203b4b1872ddfb6e000966cb0ff12ca9f2cc9c98f807430bb9a3f5b7b62d6a83b3014c695221022831cf0d73b49bcfeffbbf6febf3596cb15789f9179bc59e7c60677cb2ab71dd21027c7043d7b211067345209f6407b4ea2bbda217764ab93b1dabfd3e1cbc9a767f2103c2a27f02e473de3cf4f4e73056654be14f8f6dc241f6259a62c4418d53ec1cd953aeffffffff01a0860100000000001976a914c32287bbafb7673a2193632767ba612806c1a80788ac00000000')
z = check_rawtx('0100000001ce4c36e472159275c055ed0174711b5548732edeae0631f92f8479259a32cb2c010000006b483045022100d3451a60f6512d5896863bc47916d2814d1271ebf0cb9e38075fafcd1ee07dad022023e7a92677b0ed685c07918e8d6144e6828b8ce17ed0d617474dbb64d384532c0121032b27b0d0933c4a5ef49698ea9d58e109a9b9797194965420d2c4d64ab9369fe9ffffffff02400d03000000000017a914cfa6bd2270391368123e38d70e6c42062ecaeb538780dd1e01000000001976a914c32287bbafb7673a2193632767ba612806c1a80788ac00000000')
| 55.904167
| 2,837
| 0.735857
|
ded36e0ba3734df57a2c50dcdb89af160bf3d371
| 7,575
|
py
|
Python
|
tests/test_routing.py
|
oondeo/doodba-copier-template
|
172cbbfbbb8df405c23a1faf35b2b0b296dab60c
|
[
"BSL-1.0",
"Apache-2.0"
] | null | null | null |
tests/test_routing.py
|
oondeo/doodba-copier-template
|
172cbbfbbb8df405c23a1faf35b2b0b296dab60c
|
[
"BSL-1.0",
"Apache-2.0"
] | 1
|
2021-05-05T03:47:28.000Z
|
2021-05-05T03:47:28.000Z
|
tests/test_routing.py
|
oondeo/doodba-copier-template
|
172cbbfbbb8df405c23a1faf35b2b0b296dab60c
|
[
"BSL-1.0",
"Apache-2.0"
] | null | null | null |
import time
import uuid
from pathlib import Path
import pytest
import requests
from copier import copy
from packaging import version
from plumbum import local
from plumbum.cmd import docker_compose
@pytest.mark.parametrize("environment", ("test", "prod"))
def test_multiple_domains(
cloned_template: Path,
supported_odoo_version: float,
tmp_path: Path,
traefik_host: dict,
environment: str,
):
"""Test multiple domains are produced properly."""
base_domain = traefik_host["hostname"]
base_path = f"{base_domain}/web/login"
# XXX Remove traefik1 specific stuff some day
is_traefik1 = version.parse(traefik_host["traefik_version"]) < version.parse("2")
data = {
"odoo_listdb": True,
"odoo_version": supported_odoo_version,
"paths_without_crawlers": ["/web/login", "/web/database"],
"project_name": uuid.uuid4().hex,
f"domains_{environment}": [
# main0 has no TLS
{"hosts": [f"main0.{base_domain}"], "cert_resolver": False},
{
"hosts": [f"alt0.main0.{base_domain}", f"alt1.main0.{base_domain}"],
"cert_resolver": None,
"redirect_to": f"main0.{base_domain}",
},
# main1 has self-signed certificates
{"hosts": [f"main1.{base_domain}"], "cert_resolver": True},
{
"hosts": [f"alt0.main1.{base_domain}", f"alt1.main1.{base_domain}"],
"cert_resolver": True,
"redirect_to": f"main1.{base_domain}",
},
# main2 only serves certain routes
{
"hosts": [f"main2.{base_domain}"],
"path_prefixes": ["/insecure/"],
"entrypoints": ["web-insecure"],
"cert_resolver": False,
},
# main3 only serves certain routes in web-alt entrypoint
{
"hosts": [f"main3.{base_domain}"],
"path_prefixes": ["/alt/"],
"entrypoints": ["web-alt"],
"cert_resolver": False,
},
],
}
dc = docker_compose["-f", f"{environment}.yaml"]
with local.cwd(tmp_path):
copy(
src_path=str(cloned_template),
dst_path=".",
vcs_ref="test",
force=True,
data=data,
)
try:
dc("build")
dc(
"run",
"--rm",
"odoo",
"--stop-after-init",
"-i",
"base",
)
dc("up", "-d")
time.sleep(10)
# XXX Remove all Traefik 1 tests once it disappears
if is_traefik1:
# main0, globally redirected to TLS
response = requests.get(f"http://main0.{base_path}", verify=False)
assert response.ok
assert response.url == f"https://main0.{base_domain}:443/web/login"
assert response.headers["X-Robots-Tag"] == "noindex, nofollow"
# alt0 and alt1, globally redirected to TLS
for alt_num in range(2):
response = requests.get(
f"http://alt{alt_num}.main0.{base_path}", verify=False
)
assert response.ok
assert response.url == f"https://main0.{base_path}"
# main2 serves https on port 80; returns a 404 from Traefik (not from
# Odoo) with global HTTPS redirection
bad_response = requests.get(
f"http://main2.{base_domain}/insecure/path",
verify=False,
)
assert not bad_response.ok
assert bad_response.status_code == 404
assert "Server" not in bad_response.headers # 404 comes from Traefik
assert (
bad_response.url == f"https://main2.{base_domain}:443/insecure/path"
)
else:
# main0, no TLS
response = requests.get(f"http://main0.{base_path}")
assert response.ok
assert response.url == f"http://main0.{base_path}"
assert response.headers["X-Robots-Tag"] == "noindex, nofollow"
# alt0 and alt1, no TLS
for alt_num in range(2):
response = requests.get(f"http://alt{alt_num}.main0.{base_path}")
assert response.ok
assert response.url == f"http://main0.{base_path}"
# main2 serves https on port 80; returns a 404 from Odoo (not from
# Traefik) without HTTPS redirection
bad_response = requests.get(
f"http://main2.{base_domain}/insecure/path",
verify=False,
)
assert not bad_response.ok
assert bad_response.status_code == 404
assert "Werkzeug" in bad_response.headers.get("Server")
assert bad_response.url == f"http://main2.{base_domain}/insecure/path"
# main3 cannot find /web on port 8080; no HTTPS redirection
bad_response = requests.get(
f"http://main3.{base_domain}:8080/web",
)
assert not bad_response.ok
assert bad_response.status_code == 404
assert "Server" not in bad_response.headers # 404 comes from Traefik
assert bad_response.url == f"http://main3.{base_domain}:8080/web"
# main3 will route to odoo in /alt/foo but fail with 404 from there, no HTTPS
bad_response = requests.get(
f"http://main3.{base_domain}:8080/alt/foo",
)
assert not bad_response.ok
assert bad_response.status_code == 404
assert "Werkzeug" in bad_response.headers.get("Server")
assert bad_response.url == f"http://main3.{base_domain}:8080/alt/foo"
# main1, with self-signed TLS
response = requests.get(f"http://main1.{base_path}", verify=False)
assert response.ok
assert response.url == (
f"https://main1.{base_domain}:443/web/login"
if is_traefik1
else f"https://main1.{base_path}"
)
assert response.headers["X-Robots-Tag"] == "noindex, nofollow"
# alt0 and alt1, with self-signed TLS
for alt_num in range(2):
response = requests.get(
f"http://alt{alt_num}.main1.{base_domain}/web/database/selector",
verify=False,
)
assert response.ok
assert (
response.url == f"https://main1.{base_domain}/web/database/selector"
)
assert response.headers["X-Robots-Tag"] == "noindex, nofollow"
# missing, which fails with Traefik 404, both with and without TLS
bad_response = requests.get(
f"http://missing.{base_path}", verify=not is_traefik1
)
assert bad_response.status_code == 404
assert "Server" not in bad_response.headers
bad_response = requests.get(f"https://missing.{base_path}", verify=False)
assert bad_response.status_code == 404
assert "Server" not in bad_response.headers
finally:
dc("down", "--volumes", "--remove-orphans")
| 42.79661
| 89
| 0.528317
|
8d8c7b1946bfd64b7d7ee91b613e1ea1f66ac0ce
| 832
|
py
|
Python
|
smbl/prog/plugins/bcftools.py
|
karel-brinda/snakemake-lib
|
5922fa2fc4060d86172e991361a1cceb0af51af8
|
[
"MIT"
] | 26
|
2015-03-16T03:37:02.000Z
|
2021-01-18T17:34:16.000Z
|
smbl/prog/plugins/bcftools.py
|
karel-brinda/smbl
|
5922fa2fc4060d86172e991361a1cceb0af51af8
|
[
"MIT"
] | 12
|
2015-02-05T10:57:16.000Z
|
2016-06-07T18:09:57.000Z
|
smbl/prog/plugins/bcftools.py
|
karel-brinda/snakemake-lib
|
5922fa2fc4060d86172e991361a1cceb0af51af8
|
[
"MIT"
] | 6
|
2015-06-03T20:06:49.000Z
|
2020-12-13T09:48:03.000Z
|
#
# TODO:
# - fix linking error in CygWin
#
import smbl
import snakemake
import os
from ._program import *
BCFTOOLS = get_bin_file_path("bcftools")
VCFUTILS = get_bin_file_path("vcfutils.pl")
##########################################
##########################################
class BcfTools(Program):
@classmethod
def get_installation_files(cls):
return [
BCFTOOLS,
VCFUTILS,
]
@classmethod
def install(cls):
gitdir_bcftools=cls.git_clone("http://github.com/samtools/bcftools","bcftools")
gitdir_htslib=cls.git_clone("http://github.com/samtools/htslib","htslib")
cls.run_make("bcftools")
cls.install_file("bcftools/bcftools",BCFTOOLS)
cls.install_file("bcftools/vcfutils.pl",VCFUTILS)
@classmethod
def supported_platforms(cls):
return ["osx","linux"]
| 21.333333
| 82
| 0.627404
|
7f5342c76cb7767039db7c82d5a059cadc828aa6
| 3,273
|
py
|
Python
|
contracts/type_converter/type_converter.py
|
ICONationDevTeam/ICONSafe-SCORE
|
a02640f3bbbf71c633773a87b99d354e2466c26d
|
[
"Apache-2.0"
] | null | null | null |
contracts/type_converter/type_converter.py
|
ICONationDevTeam/ICONSafe-SCORE
|
a02640f3bbbf71c633773a87b99d354e2466c26d
|
[
"Apache-2.0"
] | null | null | null |
contracts/type_converter/type_converter.py
|
ICONationDevTeam/ICONSafe-SCORE
|
a02640f3bbbf71c633773a87b99d354e2466c26d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2021 ICONation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from iconservice import *
class ScoreTypeConverter:
@staticmethod
def convert(param_type: str, value: str):
if not isinstance(value, str):
raise IconScoreException(f"Value type must be str ({value} is {type(value)}).")
valid_types = {
"int": ScoreTypeConverter._convert_value_int,
"str": ScoreTypeConverter._convert_value_string,
"bool": ScoreTypeConverter._convert_value_bool,
"Address": ScoreTypeConverter._convert_value_address,
"bytes": ScoreTypeConverter._convert_value_bytes,
"List": ScoreTypeConverter._convert_value_list,
"TypedDict": ScoreTypeConverter._convert_value_typed_dict,
}
if not param_type in valid_types.keys():
raise IconScoreException(f"{param_type} is not supported type (only {valid_types.keys()} are supported)")
try:
return valid_types[param_type](value)
except Exception as e:
raise IconScoreException(f"Cannot convert {value} from type {param_type} : {repr(e)}")
@staticmethod
def _convert_value_int(value: str) -> int:
return int(value, 0)
@staticmethod
def _convert_value_string(value: str) -> str:
return value
@staticmethod
def _convert_value_bool(value: str) -> bool:
if value == "True" or value == "0x1" or value == "1":
return True
if value == "False" or value == "0x0" or value == "0":
return False
raise IconScoreException("Invalid bool value")
@staticmethod
def _convert_value_address(value: str) -> Address:
return Address.from_string(value)
@staticmethod
def _convert_value_bytes(value: str) -> bytes:
if value.startswith("0x"):
return bytes.fromhex(value[2:])
else:
return bytes.fromhex(value)
@staticmethod
def _convert_value_list(value: str) -> list:
items = json_loads(value)
if type(items) != list:
raise IconScoreException(f"Invalid variable type, excepted list, found {type(items)}")
result = []
for item in items:
result.append(ScoreTypeConverter.convert(item["type"], item["value"]))
return result
@staticmethod
def _convert_value_typed_dict(value: str) -> dict:
items = json_loads(value)
if type(items) != dict:
raise IconScoreException(f"Invalid variable type, excepted dict, found {type(items)}")
result = {}
for key, item in items.items():
result[key] = ScoreTypeConverter.convert(item["type"], item["value"])
return result
| 34.452632
| 117
| 0.650474
|
ca7f3ff54d9135507a34926c790bb4bed0d5f195
| 102
|
py
|
Python
|
optunapi/utils/__init__.py
|
mbarbetti/optunapi
|
48ec23f0b25c9459910e0cc01daa057acd8643e1
|
[
"MIT"
] | null | null | null |
optunapi/utils/__init__.py
|
mbarbetti/optunapi
|
48ec23f0b25c9459910e0cc01daa057acd8643e1
|
[
"MIT"
] | 2
|
2021-05-24T17:18:02.000Z
|
2021-05-25T16:00:29.000Z
|
optunapi/utils/__init__.py
|
mbarbetti/optunapi
|
48ec23f0b25c9459910e0cc01daa057acd8643e1
|
[
"MIT"
] | null | null | null |
from .suggest_from_config import suggest_from_config
from .create_log_file import create_log_file
| 34
| 52
| 0.862745
|
d45e733688b546436be1e557d0bc0302405acc08
| 284
|
py
|
Python
|
src/app/admin.py
|
aminul91/linktutor_restapi
|
2077119f9771b31e2aa49f914ebd76451fe3b53c
|
[
"MIT"
] | null | null | null |
src/app/admin.py
|
aminul91/linktutor_restapi
|
2077119f9771b31e2aa49f914ebd76451fe3b53c
|
[
"MIT"
] | null | null | null |
src/app/admin.py
|
aminul91/linktutor_restapi
|
2077119f9771b31e2aa49f914ebd76451fe3b53c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from app.models import *
# Register your models here.
from .models import *
admin.site.register(user_infos)
admin.site.register(tutorial_types)
admin.site.register(language_types)
admin.site.register(tutorials_paths)
admin.site.register(suggestions)
| 25.818182
| 36
| 0.823944
|
39f42adf0ba618338c976383685cd2290ad7bbb7
| 4,369
|
py
|
Python
|
doc/source/examples/plot_mbe_maxcut.py
|
orodrigoaraizabravo/quantum
|
aa1edea0ed8bf15c0f0a6547d765eda8e5a3afd7
|
[
"BSD-3-Clause"
] | 70
|
2021-07-20T22:42:46.000Z
|
2022-03-25T10:17:39.000Z
|
doc/source/examples/plot_mbe_maxcut.py
|
orodrigoaraizabravo/quantum
|
aa1edea0ed8bf15c0f0a6547d765eda8e5a3afd7
|
[
"BSD-3-Clause"
] | 2
|
2021-11-07T16:29:18.000Z
|
2021-11-15T11:55:23.000Z
|
doc/source/examples/plot_mbe_maxcut.py
|
orodrigoaraizabravo/quantum
|
aa1edea0ed8bf15c0f0a6547d765eda8e5a3afd7
|
[
"BSD-3-Clause"
] | 10
|
2021-11-07T14:13:50.000Z
|
2022-03-25T10:17:40.000Z
|
"""
Multi-Basis Encoding
--------------------
Multi-Basis Encoding ([1]_) (MBE) quantum optimization algorithm for MaxCut using TensorLy-Quantum.
TensorLy-Quantum provides a Python interface
to build TT-tensor network circuit simulator
for large-scale simulation of variational quantum circuits
with full Autograd support similar to traditional PyTorch Neural Networks.
"""
import tensorly as tl
import tlquantum as tlq
from torch import randint, rand, arange, cat, tanh, no_grad, float32
from torch.optim import Adam
import matplotlib.pyplot as plt
# %% Set up simulation parameters
# Uncomment the line below to use the GPU
#device = 'cuda'
device = 'cpu'
dtype = float32
nepochs = 40 #number of training epochs
nqubits = 20 #number of qubits
ncontraq = 2 #2 #number of qubits to pre-contract into single core
ncontral = 2 #2 #number of layers to pre-contract into a single core
nterms = 20
lr = 0.7
# %% Generate an input state. For each qubit, 0 --> |0> and 1 --> |1>
state = tlq.spins_to_tt_state([0 for i in range(nqubits)], device=device, dtype=dtype) # generate generic zero state |00000>
state = tlq.qubits_contract(state, ncontraq)
# %% Generate the graph vertices/edges. Each pair of qubits represents two vertices with an edge between them.
# Here we build a random graph with randomly weighted edges.
# Note: MBE allows us to encode two vertices (typically two qubits) into a single qubit using the z and x-axes.
# If y-axis included, we can encode three vertices per qubit.
vertices1 = randint(2*nqubits, (nterms,), device=device) # randomly generated first qubits (vertices) of each two-qubit term (edge)
vertices2 = randint(2*nqubits, (nterms,), device=device) # randomly generated second qubits (vertices) of each two-qubit term (edge)
vertices2[vertices2==vertices1] += 1 # because qubits in this graph are randomly generated, eliminate self-interacting terms
vertices2[vertices2 >= nqubits] = 0
weights = rand((nterms,), device=device) # randomly generated edge weights
# %% Build unitary gates in TT tensor form
RotY1 = tlq.UnaryGatesUnitary(nqubits, ncontraq, device=device, dtype=dtype) #single-qubit rotations about the Y-axis
RotY2 = tlq.UnaryGatesUnitary(nqubits, ncontraq, device=device, dtype=dtype)
CZ0 = tlq.BinaryGatesUnitary(nqubits, ncontraq, tlq.cz(device=device, dtype=dtype), 0) # one controlled-z gate for each pair of qubits using even parity (even qubits control)
unitaries = [RotY1, CZ0, RotY2]
# %% Multi-Basis Encoding (MBE) Simulation for MaxCut optimization
circuit = tlq.TTCircuit(unitaries, ncontraq, ncontral) # build TTCircuit using specified unitaries
opz, opx = tl.tensor([[1,0],[0,-1]], device=device, dtype=dtype), tl.tensor([[0,1],[1,0]], device=device, dtype=dtype) # measurement operators for MBE
print(opz)
opt = Adam(circuit.parameters(), lr=lr, amsgrad=True) # define PyTorch optimizer
loss_vec = tl.zeros(nepochs)
cut_vec = tl.zeros(nepochs)
for epoch in range(nepochs):
# TTCircuit forward pass computes expectation value of single-qubit pauli-z and pauli-x measurements
spinsz, spinsx = circuit.forward_single_qubit(state, opz, opx)
spins = cat((spinsz, spinsx))
nl_spins = tanh(spins) # apply non-linear activation function to measurement results
loss = tlq.calculate_cut(nl_spins, vertices1, vertices2, weights) # calculate the loss function using MBE
print('Relaxation (raw) loss at epoch ' + str(epoch) + ': ' + str(loss.item()) + '. \n')
with no_grad():
cut_vec[epoch] = tlq.calculate_cut(tl.sign(spins), vertices1, vertices2, weights, get_cut=True) #calculate the rounded MaxCut estimate (algorithm's result)
print('Rounded MaxCut value (algorithm\'s solution): ' + str(cut_vec[epoch]) + '. \n')
# PyTorch Autograd attends to backwards pass and parameter update
loss.backward()
opt.step()
opt.zero_grad()
loss_vec[epoch] = loss
# %% VIsualize the result
plt.rc('xtick')
plt.rc('ytick')
fig, ax1 = plt.subplots()
ax1.plot(loss_vec.detach().numpy(), color='k')
ax2 = ax1.twinx()
ax2.plot(cut_vec.detach().numpy(), color='g')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Loss', color='k')
ax2.set_ylabel('Cut', color='g')
plt.show()
# %%
# References
# ----------
# .. [1] T. L. Patti, J. Kossaifi, A. Anandkumar, and S. F. Yelin, "Variational Quantum Optimization with Multi-Basis Encodings," (2021), arXiv:2106.13304.
| 42.417476
| 174
| 0.731289
|
88028214874bd5b75637115813f1474468015e88
| 4,484
|
py
|
Python
|
src/check_binkp_node/check_binkp_node.py
|
buanzo/check_binkp_node
|
507bf760dc8daaff69a31ee16e451739a0cafc0a
|
[
"MIT"
] | 1
|
2021-09-23T14:51:55.000Z
|
2021-09-23T14:51:55.000Z
|
src/check_binkp_node/check_binkp_node.py
|
buanzo/check_binkp_node
|
507bf760dc8daaff69a31ee16e451739a0cafc0a
|
[
"MIT"
] | null | null | null |
src/check_binkp_node/check_binkp_node.py
|
buanzo/check_binkp_node
|
507bf760dc8daaff69a31ee16e451739a0cafc0a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import nagiosplugin
from datetime import datetime, timezone
from pprint import pprint
from construct import Int8ub, Struct, this, Bytes, GreedyRange
import time
import socket
import dateparser
__version__ = '0.1'
CONN_TIMEOUT = 10
READ_TIMEOUT = 3
# The v1.0 BINK Protocol spec:
# https://github.com/pgul/binkd/blob/master/doc/binkp10-en.txt
# Of course, I am not implementing an actual BINKP client...
binkp10format = Struct(
"type"/Int8ub,
"length"/Int8ub,
"cmdargs"/Int8ub,
"string"/Bytes(this.length-1),
)
def binkp_node_parse(host, port, connect_timeout=10, read_timeout=3):
start_time = datetime.now().replace(microsecond=0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(connect_timeout)
s.connect((host, port))
s.settimeout(read_timeout)
time.sleep(1)
realdata = bytearray()
while True:
data = ''
try:
data = s.recv(128)
except socket.timeout:
break
if not data:
time.sleep(0.1)
continue
elif data == '':
break
realdata.extend(data)
s.close()
stop_time = datetime.now().replace(microsecond=0)
req_duration = stop_time - start_time
parser = GreedyRange(binkp10format)
x = parser.parse(realdata)
for item in x:
# TODO: actually check using type most significant bit, etc.
d_item = item.string.decode('ascii')
if d_item[0:5] == 'TIME ':
node_time = d_item.split('TIME ')[1]
#print("GOT TIME: {}".format(node_time))
binkpdate = dateparser.parse(node_time)
print("BINKP DATE: {}".format(binkpdate))
# TODO:
today = datetime.now(binkpdate.tzinfo).replace(microsecond=0)
#pprint(today)
print("LOCAL DATE: {}".format(today))
delta = today - binkpdate
print("DURATION: {}".format(req_duration.seconds))
pprint(delta.seconds)
return(delta.seconds-req_duration.seconds)
# FIX: No TIME ?
return(None)
class BinkpNodeCheck(nagiosplugin.Resource):
def __init__(self, host, port, conn_timeout, read_timeout):
self.host = host
self.port = port
self.conn_timeout = conn_timeout
self.read_timeout = read_timeout
def probe(self):
time_diff = binkp_node_parse(self.host, port=self.port, connect_timeout=self.conn_timeout, read_timeout=self.read_timeout)
print(time_diff)
if time_diff is None:
return[nagiosplugin.Metric('binkpnodedrift',-1,context='default')]
# FIX: use nagiosplugin.state.Unknown in LoadSummary?
return [nagiosplugin.Metric('binkpnodedrift',
time_diff,
context='default')]
class LoadSummary(nagiosplugin.Summary):
def __init__(self, domain, port):
self.domain = domain
self.port = port
pass
@nagiosplugin.guarded
def main():
argp = argparse.ArgumentParser(description=__doc__)
# FIX: find good values for warning/critical
argp.add_argument('-w', '--warning', metavar='RANGE', default='6:10',
help='warning SECONDS drift. Default=6:10')
argp.add_argument('-c', '--critical', metavar='RANGE', default='0:5',
help='critical SECONDS drift. Default=0:5')
argp.add_argument('-v', '--verbose', action='count', default=0,
help='be more verbose')
argp.add_argument('-p', '--port', metavar='PORT', default=24554, type=int,
help='Remote PORT for binkp service. Default is 24554.')
argp.add_argument('domain')
args = argp.parse_args()
wrange = '@{}'.format(args.warning)
crange = '@{}'.format(args.critical)
fmetric = '{value} days until domain expires'
# FIX: add 'isvaliddomainname' test
check = nagiosplugin.Check(BinkpNodeCheck(args.domain, args.port, CONN_TIMEOUT, READ_TIMEOUT),
nagiosplugin.ScalarContext('daystoexpiration',
warning=wrange,
critical=crange,
fmt_metric=fmetric),
LoadSummary(args.domain, args.port))
check.main(verbose=args.verbose)
if __name__ == '__main__':
main()
| 35.872
| 130
| 0.600803
|
c6fd0abe25dae2645c2e288141ba61bd26e90787
| 2,747
|
py
|
Python
|
gcf_demo_excel_report.py
|
hadi-muhammad/functions-demo
|
39185a58d8182f3dc0c00a05e40cc131fc8f54a8
|
[
"MIT"
] | null | null | null |
gcf_demo_excel_report.py
|
hadi-muhammad/functions-demo
|
39185a58d8182f3dc0c00a05e40cc131fc8f54a8
|
[
"MIT"
] | null | null | null |
gcf_demo_excel_report.py
|
hadi-muhammad/functions-demo
|
39185a58d8182f3dc0c00a05e40cc131fc8f54a8
|
[
"MIT"
] | null | null | null |
from pyplatform.datawarehouse import *
from google.cloud import bigquery
import io
from flask import send_file
import datetime
import pytz
def main(request):
_doc_string = """Return excel file in response to POST request. Return function doc_string for GET request.
Keyword Arguments for request body:
query {str} -- standard sql `select statetment` OR
`stored procedure call` containing select statements
filename {str} -- custom filename for the excel file (default: report_timestamp.xlsx)
sheet_name {str or list} -- custom sheet name for the excel sheet. For multi statement SLECT query or stored procedure, list of sheetname should be provided (default: Sheet1, Sheet2...)
index {bool or list} -- if ture, writes dataframe index. For multi-sheet list of bool should be provided. By default index is ignored (default: {False})
Returns:
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
"""
client = bigquery.Client()
requestDateTime = datetime.datetime.now(pytz.timezone(
'America/Chicago')).strftime('%Y-%m-%d %A %I:%M:%S %p CDT')
headers = {"client_id": client.get_service_account_email(),
"project_id": client.project}
request_json = request.get_json()
if request_json:
print(f'PUT request type of request_json {type(request_json)} , {request_json}')
# parse function arguments from request body
query = request_json.get('query')
filename = request_json.get('filename')
sheet_name = request_json.get('sheet_name')
index = request_json.get('index')
# setting default values
job_id = create_bq_job_id("Azure logic App Excel file request")
if filename == None:
filename = f'report_{job_id[:19]}.xlsx'
else:
filename = filename.split('.')[0] + '.xlsx'
if not index:
index = False
print(f" PUT request provided => query {query}, filename {filename}, sheet_name:{sheet_name}, index: {index}")
df = bq_to_df(query)
in_mem_file = io.BytesIO()
dfs =[df]
dfs_to_excel(dfs, in_mem_file, sheet_name=sheet_name, index=index)
in_mem_file.seek(0)
print("sending requested file")
response = send_file(in_mem_file, mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
as_attachment=True, attachment_filename=filename)
headers['requestTimestamp'] = requestDateTime
headers["filename"] = filename
headers['job_id'] = job_id
return (response, 200, headers)
else:
return (_doc_string,200,headers)
| 36.626667
| 193
| 0.659265
|
c56879d51a27bd0e41030c0efe058f73b056392e
| 11,145
|
py
|
Python
|
theano/misc/pkl_utils.py
|
michaelosthege/aesara
|
55c88832ba71f87c9612d573ede74a4c042ef570
|
[
"BSD-3-Clause"
] | 1
|
2020-12-30T19:12:52.000Z
|
2020-12-30T19:12:52.000Z
|
theano/misc/pkl_utils.py
|
michaelosthege/aesara
|
55c88832ba71f87c9612d573ede74a4c042ef570
|
[
"BSD-3-Clause"
] | null | null | null |
theano/misc/pkl_utils.py
|
michaelosthege/aesara
|
55c88832ba71f87c9612d573ede74a4c042ef570
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Utility classes and methods to pickle parts of symbolic graph.
These pickled graphs can be used, for instance, as cases for
unit tests or regression tests.
"""
import os
import pickle
import sys
import tempfile
import warnings
import zipfile
from collections import defaultdict
from contextlib import closing
from io import BytesIO
from pickle import HIGHEST_PROTOCOL
import numpy as np
import theano
try:
from pickle import DEFAULT_PROTOCOL
except ImportError:
DEFAULT_PROTOCOL = HIGHEST_PROTOCOL
from theano.compile.sharedvalue import SharedVariable
from theano.configdefaults import config
__docformat__ = "restructuredtext en"
__authors__ = "Pascal Lamblin"
__copyright__ = "Copyright 2013, Universite de Montreal"
__license__ = "3-clause BSD"
min_recursion = 3000
if sys.getrecursionlimit() < min_recursion:
sys.setrecursionlimit(min_recursion)
Pickler = pickle.Pickler
class StripPickler(Pickler):
"""
Subclass of Pickler that strips unnecessary attributes from Theano objects.
.. versionadded:: 0.8
Example of use::
fn_args = dict(inputs=inputs,
outputs=outputs,
updates=updates)
dest_pkl = 'my_test.pkl'
f = open(dest_pkl, 'wb')
strip_pickler = StripPickler(f, protocol=-1)
strip_pickler.dump(fn_args)
f.close()
"""
def __init__(self, file, protocol=0, extra_tag_to_remove=None):
# Can't use super as Pickler isn't a new style class
Pickler.__init__(self, file, protocol)
self.tag_to_remove = ["trace", "test_value"]
if extra_tag_to_remove:
self.tag_to_remove.extend(extra_tag_to_remove)
def save(self, obj):
# Remove the tag.trace attribute from Variable and Apply nodes
if isinstance(obj, theano.gof.utils.Scratchpad):
for tag in self.tag_to_remove:
if hasattr(obj, tag):
del obj.__dict__[tag]
# Remove manually-added docstring of Elemwise ops
elif isinstance(obj, theano.tensor.Elemwise):
if "__doc__" in obj.__dict__:
del obj.__dict__["__doc__"]
return Pickler.save(self, obj)
class PersistentNdarrayID:
"""Persist ndarrays in an object by saving them to a zip file.
:param zip_file: A zip file handle that the NumPy arrays will be saved to.
:type zip_file: :class:`zipfile.ZipFile`
.. note:
The convention for persistent ids given by this class and its derived
classes is that the name should take the form `type.name` where `type`
can be used by the persistent loader to determine how to load the
object, while `name` is human-readable and as descriptive as possible.
"""
def __init__(self, zip_file):
self.zip_file = zip_file
self.count = 0
self.seen = {}
def _resolve_name(self, obj):
"""Determine the name the object should be saved under."""
name = f"array_{self.count}"
self.count += 1
return name
def __call__(self, obj):
if isinstance(obj, np.ndarray):
if id(obj) not in self.seen:
def write_array(f):
np.lib.format.write_array(f, obj)
name = self._resolve_name(obj)
zipadd(write_array, self.zip_file, name)
self.seen[id(obj)] = f"ndarray.{name}"
return self.seen[id(obj)]
class PersistentGpuArrayID(PersistentNdarrayID):
def __call__(self, obj):
from theano.gpuarray.type import _name_for_ctx
try:
import pygpu
except ImportError:
pygpu = None
if pygpu and isinstance(obj, pygpu.gpuarray.GpuArray):
if id(obj) not in self.seen:
def write_array(f):
pickle.dump(_name_for_ctx(obj.context), f, 2)
np.lib.format.write_array(f, np.asarray(obj))
name = self._resolve_name(obj)
zipadd(write_array, self.zip_file, name)
self.seen[id(obj)] = f"gpuarray.{name}"
return self.seen[id(obj)]
return super().__call__(obj)
class PersistentSharedVariableID(PersistentGpuArrayID):
"""Uses shared variable names when persisting to zip file.
If a shared variable has a name, this name is used as the name of the
NPY file inside of the zip file. NumPy arrays that aren't matched to a
shared variable are persisted as usual (i.e. `array_0`, `array_1`,
etc.)
:param allow_unnamed: Allow shared variables without a name to be
persisted. Defaults to ``True``.
:type allow_unnamed: bool, optional
:param allow_duplicates: Allow multiple shared variables to have the same
name, in which case they will be numbered e.g. `x`, `x_2`, `x_3`, etc.
Defaults to ``True``.
:type allow_duplicates: bool, optional
:raises ValueError
If an unnamed shared variable is encountered and `allow_unnamed` is
``False``, or if two shared variables have the same name, and
`allow_duplicates` is ``False``.
"""
def __init__(self, zip_file, allow_unnamed=True, allow_duplicates=True):
super().__init__(zip_file)
self.name_counter = defaultdict(int)
self.ndarray_names = {}
self.allow_unnamed = allow_unnamed
self.allow_duplicates = allow_duplicates
def _resolve_name(self, obj):
if id(obj) in self.ndarray_names:
name = self.ndarray_names[id(obj)]
count = self.name_counter[name]
self.name_counter[name] += 1
if count:
if not self.allow_duplicates:
raise ValueError(
f"multiple shared variables with the name `{name}` found"
)
name = f"{name}_{count + 1}"
return name
return super()._resolve_name(obj)
def __call__(self, obj):
if isinstance(obj, SharedVariable):
if obj.name:
if obj.name == "pkl":
ValueError("can't pickle shared variable with name `pkl`")
self.ndarray_names[id(obj.container.storage[0])] = obj.name
elif not self.allow_unnamed:
raise ValueError(f"unnamed shared variable, {obj}")
return super().__call__(obj)
class PersistentNdarrayLoad:
"""Load NumPy arrays that were persisted to a zip file when pickling.
:param zip_file: The zip file handle in which the NumPy arrays are saved.
:type zip_file: :class:`zipfile.ZipFile`
"""
def __init__(self, zip_file):
self.zip_file = zip_file
self.cache = {}
def __call__(self, persid):
from theano.gpuarray import pygpu
from theano.gpuarray.type import get_context
array_type, name = persid.split(".")
if name in self.cache:
return self.cache[name]
ret = None
if array_type == "gpuarray":
with self.zip_file.open(name) as f:
ctx_name = pickle.load(f)
array = np.lib.format.read_array(f)
if config.experimental__unpickle_gpu_on_cpu:
# directly return numpy array
warnings.warn(
"config.experimental__unpickle_gpu_on_cpu is set "
"to True. Unpickling GpuArray as numpy.ndarray"
)
ret = array
elif pygpu:
ret = pygpu.array(array, context=get_context(ctx_name))
else:
raise ImportError("pygpu not found. Cannot unpickle GpuArray")
else:
with self.zip_file.open(name) as f:
ret = np.lib.format.read_array(f)
self.cache[name] = ret
return ret
def dump(
obj,
file_handler,
protocol=DEFAULT_PROTOCOL,
persistent_id=PersistentSharedVariableID,
):
"""Pickles an object to a zip file using external persistence.
:param obj: The object to pickle.
:type obj: object
:param file_handler: The file handle to save the object to.
:type file_handler: file
:param protocol: The pickling protocol to use. Unlike Python's built-in
pickle, the default is set to `2` instead of 0 for Python 2. The
Python 3 default (level 3) is maintained.
:type protocol: int, optional
:param persistent_id: The callable that persists certain objects in the
object hierarchy to separate files inside of the zip file. For example,
:class:`PersistentNdarrayID` saves any :class:`numpy.ndarray` to a
separate NPY file inside of the zip file.
:type persistent_id: callable
.. versionadded:: 0.8
.. note::
The final file is simply a zipped file containing at least one file,
`pkl`, which contains the pickled object. It can contain any other
number of external objects. Note that the zip files are compatible with
NumPy's :func:`numpy.load` function.
>>> import theano
>>> foo_1 = theano.shared(0, name='foo')
>>> foo_2 = theano.shared(1, name='foo')
>>> with open('model.zip', 'wb') as f:
... dump((foo_1, foo_2, np.array(2)), f)
>>> np.load('model.zip').keys()
['foo', 'foo_2', 'array_0', 'pkl']
>>> np.load('model.zip')['foo']
array(0)
>>> with open('model.zip', 'rb') as f:
... foo_1, foo_2, array = load(f)
>>> array
array(2)
"""
with closing(
zipfile.ZipFile(file_handler, "w", zipfile.ZIP_DEFLATED, allowZip64=True)
) as zip_file:
def func(f):
p = pickle.Pickler(f, protocol=protocol)
p.persistent_id = persistent_id(zip_file)
p.dump(obj)
zipadd(func, zip_file, "pkl")
def load(f, persistent_load=PersistentNdarrayLoad):
"""Load a file that was dumped to a zip file.
:param f: The file handle to the zip file to load the object from.
:type f: file
:param persistent_load: The persistent loading function to use for
unpickling. This must be compatible with the `persisten_id` function
used when pickling.
:type persistent_load: callable, optional
.. versionadded:: 0.8
"""
with closing(zipfile.ZipFile(f, "r")) as zip_file:
p = pickle.Unpickler(BytesIO(zip_file.open("pkl").read()))
p.persistent_load = persistent_load(zip_file)
return p.load()
def zipadd(func, zip_file, name):
"""Calls a function with a file object, saving it to a zip file.
:param func: The function to call.
:type func: callable
:param zip_file: The zip file that `func` should write its data to.
:type zip_file: :class:`zipfile.ZipFile`
:param name: The name of the file inside of the zipped archive that `func`
should save its data to.
:type name: str
"""
with tempfile.NamedTemporaryFile("wb", delete=False) as temp_file:
func(temp_file)
temp_file.close()
zip_file.write(temp_file.name, arcname=name)
if os.path.isfile(temp_file.name):
os.remove(temp_file.name)
| 32.304348
| 81
| 0.628264
|
6a85dbfd365b831c268163a891f6fb284edaf426
| 9,937
|
py
|
Python
|
fairseq/tasks/fairseq_task.py
|
jiahaosuda/fairseq
|
94fedf0026ee0bdc652bc7c5706568ae27b78375
|
[
"BSD-3-Clause"
] | 1
|
2019-03-30T11:24:15.000Z
|
2019-03-30T11:24:15.000Z
|
fairseq/tasks/fairseq_task.py
|
jiahaosuda/fairseq
|
94fedf0026ee0bdc652bc7c5706568ae27b78375
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/tasks/fairseq_task.py
|
jiahaosuda/fairseq
|
94fedf0026ee0bdc652bc7c5706568ae27b78375
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from fairseq import tokenizer
from fairseq.data import data_utils, FairseqDataset, iterators, Dictionary
from fairseq.tokenizer import Tokenizer
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
def __init__(self, args):
self.args = args
self.datasets = {}
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Tokenizer.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError('Dataset not loaded: ' + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError('Datasets are expected to be of type FairseqDataset')
return self.datasets[split]
def get_batch_iterator(
self, dataset, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1,
seed=1, num_shards=1, shard_id=0, num_workers=0,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
assert isinstance(dataset, FairseqDataset)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
indices = data_utils.filter_by_size(
indices, dataset.size, max_positions, raise_exception=(not ignore_invalid_inputs),
)
# create mini-batches with given size constraints
batch_sampler = data_utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
return iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
)
def build_model(self, args):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models
return models.build_model(args, self)
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
def build_generator(self, args):
if args.score_reference:
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(self.target_dictionary)
else:
from fairseq.sequence_generator import SequenceGenerator
return SequenceGenerator(
self.target_dictionary,
beam_size=args.beam,
max_len_a=args.max_len_a,
max_len_b=args.max_len_b,
min_len=args.min_len,
stop_early=(not args.no_early_stop),
normalize_scores=(not args.unnormalized),
len_penalty=args.lenpen,
unk_penalty=args.unkpen,
sampling=args.sampling,
sampling_topk=args.sampling_topk,
sampling_temperature=args.sampling_temperature,
diverse_beam_groups=args.diverse_beam_groups,
diverse_beam_strength=args.diverse_beam_strength,
match_source_len=args.match_source_len,
no_repeat_ngram_size=args.no_repeat_ngram_size,
)
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
def grad_denom(self, sample_sizes, criterion):
return criterion.__class__.grad_denom(sample_sizes)
def aggregate_logging_outputs(self, logging_outputs, criterion):
return criterion._aggregate_logging_outputs(logging_outputs)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
| 37.217228
| 95
| 0.626749
|
fbcac021fd113c214e4ebe1de69cd0c4407e0bd3
| 1,334
|
py
|
Python
|
test/functional/resendwallettransactions.py
|
ofichain/ofic
|
a320b15f1a0bc8d44fa81346b3ad385c35b39852
|
[
"MIT"
] | null | null | null |
test/functional/resendwallettransactions.py
|
ofichain/ofic
|
a320b15f1a0bc8d44fa81346b3ad385c35b39852
|
[
"MIT"
] | null | null | null |
test/functional/resendwallettransactions.py
|
ofichain/ofic
|
a320b15f1a0bc8d44fa81346b3ad385c35b39852
|
[
"MIT"
] | 1
|
2018-02-28T02:15:20.000Z
|
2018-02-28T02:15:20.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Crowncoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resendwallettransactions RPC."""
from test_framework.test_framework import CrowncoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class ResendWalletTransactionsTest(CrowncoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['--walletbroadcast=false']]
def run_test(self):
# Should raise RPC_WALLET_ERROR (-4) if walletbroadcast is disabled.
assert_raises_rpc_error(-4, "Error: Wallet transaction broadcasting is disabled with -walletbroadcast", self.nodes[0].resendwallettransactions)
# Should return an empty array if there aren't unconfirmed wallet transactions.
self.stop_node(0)
self.start_node(0, extra_args=[])
assert_equal(self.nodes[0].resendwallettransactions(), [])
# Should return an array with the unconfirmed wallet transaction.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
assert_equal(self.nodes[0].resendwallettransactions(), [txid])
if __name__ == '__main__':
ResendWalletTransactionsTest().main()
| 44.466667
| 151
| 0.742129
|
0e34cd79961e3500302924651449e87117ab5baf
| 12,064
|
py
|
Python
|
numpy/typing/_callable.py
|
AngelGris/numpy
|
32b564ade7ef22439b5f2b9c11aa4c63f0ecd6fd
|
[
"BSD-3-Clause"
] | 4
|
2021-02-19T19:10:50.000Z
|
2021-02-23T13:27:44.000Z
|
numpy/typing/_callable.py
|
jamiebarker0310/numpy
|
3e2406a51845dbefd696d596fd2e0961b92ed09a
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/typing/_callable.py
|
jamiebarker0310/numpy
|
3e2406a51845dbefd696d596fd2e0961b92ed09a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
A module with various ``typing.Protocol`` subclasses that implement
the ``__call__`` magic method.
See the `Mypy documentation`_ on protocols for more details.
.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
"""
from __future__ import annotations
import sys
from typing import (
Union,
TypeVar,
overload,
Any,
Tuple,
NoReturn,
TYPE_CHECKING,
)
from numpy import (
ndarray,
dtype,
generic,
bool_,
timedelta64,
number,
integer,
unsignedinteger,
signedinteger,
int8,
int_,
floating,
float64,
complexfloating,
complex128,
)
from ._nbit import _NBitInt
from ._scalars import (
_BoolLike_co,
_IntLike_co,
_FloatLike_co,
_ComplexLike_co,
_NumberLike_co,
)
from . import NBitBase
from ._array_like import ArrayLike, _ArrayOrScalar
if sys.version_info >= (3, 8):
from typing import Protocol
HAVE_PROTOCOL = True
else:
try:
from typing_extensions import Protocol
except ImportError:
HAVE_PROTOCOL = False
else:
HAVE_PROTOCOL = True
if TYPE_CHECKING or HAVE_PROTOCOL:
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_2Tuple = Tuple[_T1, _T1]
_NBit1 = TypeVar("_NBit1", bound=NBitBase)
_NBit2 = TypeVar("_NBit2", bound=NBitBase)
_IntType = TypeVar("_IntType", bound=integer)
_FloatType = TypeVar("_FloatType", bound=floating)
_NumberType = TypeVar("_NumberType", bound=number)
_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
class _BoolOp(Protocol[_GenericType_co]):
@overload
def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ...
@overload # platform dependent
def __call__(self, __other: int) -> int_: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(self, __other: _NumberType) -> _NumberType: ...
class _BoolBitOp(Protocol[_GenericType_co]):
@overload
def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ...
@overload # platform dependent
def __call__(self, __other: int) -> int_: ...
@overload
def __call__(self, __other: _IntType) -> _IntType: ...
class _BoolSub(Protocol):
# Note that `__other: bool_` is absent here
@overload
def __call__(self, __other: bool) -> NoReturn: ...
@overload # platform dependent
def __call__(self, __other: int) -> int_: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(self, __other: _NumberType) -> _NumberType: ...
class _BoolTrueDiv(Protocol):
@overload
def __call__(self, __other: Union[float, _IntLike_co]) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(self, __other: _NumberType) -> _NumberType: ...
class _BoolMod(Protocol):
@overload
def __call__(self, __other: _BoolLike_co) -> int8: ...
@overload # platform dependent
def __call__(self, __other: int) -> int_: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: _IntType) -> _IntType: ...
@overload
def __call__(self, __other: _FloatType) -> _FloatType: ...
class _BoolDivMod(Protocol):
@overload
def __call__(self, __other: _BoolLike_co) -> _2Tuple[int8]: ...
@overload # platform dependent
def __call__(self, __other: int) -> _2Tuple[int_]: ...
@overload
def __call__(self, __other: float) -> _2Tuple[float64]: ...
@overload
def __call__(self, __other: _IntType) -> _2Tuple[_IntType]: ...
@overload
def __call__(self, __other: _FloatType) -> _2Tuple[_FloatType]: ...
class _TD64Div(Protocol[_NumberType_co]):
@overload
def __call__(self, __other: timedelta64) -> _NumberType_co: ...
@overload
def __call__(self, __other: _BoolLike_co) -> NoReturn: ...
@overload
def __call__(self, __other: _FloatLike_co) -> timedelta64: ...
class _IntTrueDiv(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> floating[_NBit1]: ...
@overload
def __call__(self, __other: int) -> floating[_NBitInt]: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(self, __other: integer[_NBit2]) -> floating[Union[_NBit1, _NBit2]]: ...
class _UnsignedIntOp(Protocol[_NBit1]):
# NOTE: `uint64 + signedinteger -> float64`
@overload
def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(
self, __other: Union[int, signedinteger[Any]]
) -> Union[signedinteger[Any], float64]: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(
self, __other: unsignedinteger[_NBit2]
) -> unsignedinteger[Union[_NBit1, _NBit2]]: ...
class _UnsignedIntBitOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(self, __other: int) -> signedinteger[Any]: ...
@overload
def __call__(self, __other: signedinteger[Any]) -> signedinteger[Any]: ...
@overload
def __call__(
self, __other: unsignedinteger[_NBit2]
) -> unsignedinteger[Union[_NBit1, _NBit2]]: ...
class _UnsignedIntMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(
self, __other: Union[int, signedinteger[Any]]
) -> Union[signedinteger[Any], float64]: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(
self, __other: unsignedinteger[_NBit2]
) -> unsignedinteger[Union[_NBit1, _NBit2]]: ...
class _UnsignedIntDivMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ...
@overload
def __call__(
self, __other: Union[int, signedinteger[Any]]
) -> Union[_2Tuple[signedinteger[Any]], _2Tuple[float64]]: ...
@overload
def __call__(self, __other: float) -> _2Tuple[float64]: ...
@overload
def __call__(
self, __other: unsignedinteger[_NBit2]
) -> _2Tuple[unsignedinteger[Union[_NBit1, _NBit2]]]: ...
class _SignedIntOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(
self, __other: signedinteger[_NBit2]
) -> signedinteger[Union[_NBit1, _NBit2]]: ...
class _SignedIntBitOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ...
@overload
def __call__(
self, __other: signedinteger[_NBit2]
) -> signedinteger[Union[_NBit1, _NBit2]]: ...
class _SignedIntMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(
self, __other: signedinteger[_NBit2]
) -> signedinteger[Union[_NBit1, _NBit2]]: ...
class _SignedIntDivMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ...
@overload
def __call__(self, __other: int) -> _2Tuple[signedinteger[Union[_NBit1, _NBitInt]]]: ...
@overload
def __call__(self, __other: float) -> _2Tuple[float64]: ...
@overload
def __call__(
self, __other: signedinteger[_NBit2]
) -> _2Tuple[signedinteger[Union[_NBit1, _NBit2]]]: ...
class _FloatOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> floating[_NBit1]: ...
@overload
def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(
self, __other: Union[integer[_NBit2], floating[_NBit2]]
) -> floating[Union[_NBit1, _NBit2]]: ...
class _FloatMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> floating[_NBit1]: ...
@overload
def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(
self, __other: Union[integer[_NBit2], floating[_NBit2]]
) -> floating[Union[_NBit1, _NBit2]]: ...
class _FloatDivMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> _2Tuple[floating[_NBit1]]: ...
@overload
def __call__(self, __other: int) -> _2Tuple[floating[Union[_NBit1, _NBitInt]]]: ...
@overload
def __call__(self, __other: float) -> _2Tuple[float64]: ...
@overload
def __call__(
self, __other: Union[integer[_NBit2], floating[_NBit2]]
) -> _2Tuple[floating[Union[_NBit1, _NBit2]]]: ...
class _ComplexOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> complexfloating[_NBit1, _NBit1]: ...
@overload
def __call__(self, __other: int) -> complexfloating[Union[_NBit1, _NBitInt], Union[_NBit1, _NBitInt]]: ...
@overload
def __call__(self, __other: Union[float, complex]) -> complex128: ...
@overload
def __call__(
self,
__other: Union[
integer[_NBit2],
floating[_NBit2],
complexfloating[_NBit2, _NBit2],
]
) -> complexfloating[Union[_NBit1, _NBit2], Union[_NBit1, _NBit2]]: ...
class _NumberOp(Protocol):
def __call__(self, __other: _NumberLike_co) -> Any: ...
class _ComparisonOp(Protocol[_T1, _T2]):
@overload
def __call__(self, __other: _T1) -> bool_: ...
@overload
def __call__(self, __other: _T2) -> _ArrayOrScalar[bool_]: ...
else:
_BoolOp = Any
_BoolBitOp = Any
_BoolSub = Any
_BoolTrueDiv = Any
_BoolMod = Any
_BoolDivMod = Any
_TD64Div = Any
_IntTrueDiv = Any
_UnsignedIntOp = Any
_UnsignedIntBitOp = Any
_UnsignedIntMod = Any
_UnsignedIntDivMod = Any
_SignedIntOp = Any
_SignedIntBitOp = Any
_SignedIntMod = Any
_SignedIntDivMod = Any
_FloatOp = Any
_FloatMod = Any
_FloatDivMod = Any
_ComplexOp = Any
_NumberOp = Any
_ComparisonOp = Any
| 34.079096
| 114
| 0.604609
|
766a3f6169114c51451d04e5edfeec6645698993
| 1,199
|
py
|
Python
|
raw_data/get_raw_data.py
|
preversewharf45/game_recommendation_system
|
6922d8d8099473057b3c0676092b567b2715ed88
|
[
"MIT"
] | null | null | null |
raw_data/get_raw_data.py
|
preversewharf45/game_recommendation_system
|
6922d8d8099473057b3c0676092b567b2715ed88
|
[
"MIT"
] | null | null | null |
raw_data/get_raw_data.py
|
preversewharf45/game_recommendation_system
|
6922d8d8099473057b3c0676092b567b2715ed88
|
[
"MIT"
] | 1
|
2021-06-08T16:55:41.000Z
|
2021-06-08T16:55:41.000Z
|
import pandas as pd
import glob
import os.path
def get_raw_data( name, script_rel_path = "" ):
file_path = ""
# Releative path of `get_raw_data.py` script.
if ( script_rel_path != "" ):
file_path = script_rel_path + "/"
# `user_reviews_raw` is handled separately because
# it's splitted into multiple parts.
if ( name == "user_reviews" ):
file_path += "user_reviews_raw/*"
if ( os.path.exists( "user_reviews_raw.csv" ) != True ):
new_file = open( "user_reviews_raw.csv", 'w', newline = '', encoding = "utf-8" )
# Iterate through all user_reviews_data files.
for part_file_path in glob.glob( file_path ):
part_file = open( part_file_path, 'r', newline = '', encoding = "utf-8" )
new_file.write( part_file.read() )
part_file.close()
new_file.close()
return pd.read_csv( "user_reviews_raw.csv", delimiter = ',' )
# The rest of raw datas are simply stored in a single file.
if ( name == "app_data" ):
file_path += "app_data_raw/app_data_raw.csv"
elif ( name == "app_list" ):
file_path += "app_list/app_list.csv"
elif ( name == "app_reviews" ):
file_path += "app_reviews_raw/app_reviews_raw.csv"
return pd.read_csv( file_path, delimiter = ',' )
| 29.975
| 83
| 0.676397
|
3483897e42d3e15b3613bf0635e0724d0616faba
| 36,694
|
py
|
Python
|
rpython/rlib/rmmap.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
rpython/rlib/rmmap.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
rpython/rlib/rmmap.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 30
|
2018-08-20T03:16:34.000Z
|
2022-01-12T17:39:22.000Z
|
"""Interp-level mmap-like object.
Note that all the methods assume that the mmap is valid (or writable, for
writing methods). You have to call check_valid() from the higher-level API,
as well as maybe check_writeable(). In the case of PyPy, this is done from
pypy/module/mmap/.
"""
from rpython.rtyper.tool import rffi_platform
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import rposix
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rlib.objectmodel import we_are_translated, specialize
from rpython.rlib.nonconst import NonConstant
from rpython.rlib.rarithmetic import intmask
import sys
import os
import platform
import stat
_POSIX = os.name == "posix"
_MS_WINDOWS = os.name == "nt"
_64BIT = "64bit" in platform.architecture()[0]
_CYGWIN = "cygwin" == sys.platform
class RMMapError(Exception):
def __init__(self, message):
self.message = message
class RValueError(RMMapError):
pass
class RTypeError(RMMapError):
pass
includes = ["sys/types.h"]
if _POSIX:
includes += ['unistd.h', 'sys/mman.h']
elif _MS_WINDOWS:
includes += ['winsock2.h', 'windows.h']
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
includes=includes,
#pre_include_bits=['#ifndef _GNU_SOURCE\n' +
# '#define _GNU_SOURCE\n' +
# '#endif']
# ^^^ _GNU_SOURCE is always defined by the ExternalCompilationInfo now
)
size_t = rffi_platform.SimpleType("size_t", rffi.LONG)
off_t = rffi_platform.SimpleType("off_t", rffi.LONG)
constants = {}
if _POSIX:
# constants, look in sys/mman.h and platform docs for the meaning
# some constants are linux only so they will be correctly exposed outside
# depending on the OS
constant_names = ['MAP_SHARED', 'MAP_PRIVATE', 'MAP_FIXED',
'PROT_READ', 'PROT_WRITE',
'MS_SYNC']
opt_constant_names = ['MAP_ANON', 'MAP_ANONYMOUS', 'MAP_NORESERVE',
'PROT_EXEC',
'MAP_DENYWRITE', 'MAP_EXECUTABLE']
for name in constant_names:
setattr(CConfig, name, rffi_platform.ConstantInteger(name))
for name in opt_constant_names:
setattr(CConfig, name, rffi_platform.DefinedConstantInteger(name))
CConfig.MREMAP_MAYMOVE = (
rffi_platform.DefinedConstantInteger("MREMAP_MAYMOVE"))
CConfig.has_mremap = rffi_platform.Has('mremap(NULL, 0, 0, 0)')
CConfig.has_madvise = rffi_platform.Has('madvise(NULL, 0, 0)')
# ^^ both are a dirty hack, this is probably a macro
CConfig.MADV_DONTNEED = (
rffi_platform.DefinedConstantInteger('MADV_DONTNEED'))
CConfig.MADV_FREE = (
rffi_platform.DefinedConstantInteger('MADV_FREE'))
elif _MS_WINDOWS:
constant_names = ['PAGE_READONLY', 'PAGE_READWRITE', 'PAGE_WRITECOPY',
'FILE_MAP_READ', 'FILE_MAP_WRITE', 'FILE_MAP_COPY',
'DUPLICATE_SAME_ACCESS', 'MEM_COMMIT', 'MEM_RESERVE',
'MEM_RELEASE', 'PAGE_EXECUTE_READWRITE', 'PAGE_NOACCESS',
'MEM_RESET']
for name in constant_names:
setattr(CConfig, name, rffi_platform.ConstantInteger(name))
from rpython.rlib import rwin32
from rpython.rlib.rwin32 import HANDLE, LPHANDLE
from rpython.rlib.rwin32 import NULL_HANDLE, INVALID_HANDLE_VALUE
from rpython.rlib.rwin32 import DWORD, WORD, DWORD_PTR, LPDWORD
from rpython.rlib.rwin32 import BOOL, LPVOID, LPCSTR, SIZE_T
from rpython.rlib.rwin32 import LONG, PLONG
# export the constants inside and outside. see __init__.py
cConfig = rffi_platform.configure(CConfig)
constants.update(cConfig)
if _POSIX:
# MAP_ANONYMOUS is not always present but it's always available at CPython level
if constants["MAP_ANONYMOUS"] is None:
constants["MAP_ANONYMOUS"] = constants["MAP_ANON"]
assert constants["MAP_ANONYMOUS"] is not None
constants["MAP_ANON"] = constants["MAP_ANONYMOUS"]
locals().update(constants)
_ACCESS_DEFAULT, ACCESS_READ, ACCESS_WRITE, ACCESS_COPY = range(4)
if rffi.sizeof(off_t) > rffi.sizeof(lltype.Signed):
HAVE_LARGEFILE_SUPPORT = True
else:
HAVE_LARGEFILE_SUPPORT = False
def external(name, args, result, save_err_on_unsafe=0, save_err_on_safe=0,
**kwargs):
unsafe = rffi.llexternal(name, args, result,
compilation_info=CConfig._compilation_info_,
save_err=save_err_on_unsafe,
**kwargs)
safe = rffi.llexternal(name, args, result,
compilation_info=CConfig._compilation_info_,
sandboxsafe=True, releasegil=False,
save_err=save_err_on_safe,
**kwargs)
return unsafe, safe
def winexternal(name, args, result, **kwargs):
unsafe = rffi.llexternal(name, args, result,
compilation_info=CConfig._compilation_info_,
calling_conv='win',
**kwargs)
safe = rffi.llexternal(name, args, result,
compilation_info=CConfig._compilation_info_,
calling_conv='win',
sandboxsafe=True, releasegil=False,
**kwargs)
return unsafe, safe
PTR = rffi.CCHARP
if _CYGWIN:
# XXX: macro=True hack for newer versions of Cygwin (as of 12/2012)
_, c_malloc_safe = external('malloc', [size_t], PTR, macro=True)
_, c_free_safe = external('free', [PTR], lltype.Void, macro=True)
c_memmove, _ = external('memmove', [PTR, PTR, size_t], lltype.Void)
if _POSIX:
has_mremap = cConfig['has_mremap']
has_madvise = cConfig['has_madvise']
c_mmap, c_mmap_safe = external('mmap', [PTR, size_t, rffi.INT, rffi.INT,
rffi.INT, off_t], PTR, macro=True,
save_err_on_unsafe=rffi.RFFI_SAVE_ERRNO)
# 'mmap' on linux32 is a macro that calls 'mmap64'
_, c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT)
c_msync, _ = external('msync', [PTR, size_t, rffi.INT], rffi.INT,
save_err_on_unsafe=rffi.RFFI_SAVE_ERRNO)
if has_mremap:
c_mremap, _ = external('mremap',
[PTR, size_t, size_t, rffi.ULONG], PTR)
if has_madvise:
_, c_madvise_safe = external('madvise', [PTR, size_t, rffi.INT],
rffi.INT, _nowrapper=True)
# this one is always safe
_pagesize = rffi_platform.getintegerfunctionresult('getpagesize',
includes=includes)
_get_allocation_granularity = _get_page_size = lambda: _pagesize
elif _MS_WINDOWS:
class ComplexCConfig:
_compilation_info_ = CConfig._compilation_info_
SYSINFO_STRUCT = rffi.CStruct(
'SYSINFO_STRUCT',
("wProcessorArchitecture", WORD),
("wReserved", WORD),
)
SYSINFO_UNION = rffi.CStruct(
'union SYSINFO_UNION',
("dwOemId", DWORD),
("_struct_", SYSINFO_STRUCT),
)
# sorry, I can't find a way to insert the above
# because the union field has no name
SYSTEM_INFO = rffi_platform.Struct(
'SYSTEM_INFO', [
## ("_union_", SYSINFO_UNION),
## instead, we put the smaller fields, here
("wProcessorArchitecture", WORD),
("wReserved", WORD),
## should be a union. dwOemId is obsolete, anyway
("dwPageSize", DWORD),
("lpMinimumApplicationAddress", LPVOID),
("lpMaximumApplicationAddress", LPVOID),
("dwActiveProcessorMask", DWORD_PTR),
("dwNumberOfProcessors", DWORD),
("dwProcessorType", DWORD),
("dwAllocationGranularity", DWORD),
("wProcessorLevel", WORD),
("wProcessorRevision", WORD),
])
config = rffi_platform.configure(ComplexCConfig)
SYSTEM_INFO = config['SYSTEM_INFO']
SYSTEM_INFO_P = lltype.Ptr(SYSTEM_INFO)
GetSystemInfo, _ = winexternal('GetSystemInfo', [SYSTEM_INFO_P], lltype.Void)
GetFileSize, _ = winexternal('GetFileSize', [HANDLE, LPDWORD], DWORD,
save_err=rffi.RFFI_SAVE_LASTERROR)
GetCurrentProcess, _ = winexternal('GetCurrentProcess', [], HANDLE)
DuplicateHandle, _ = winexternal('DuplicateHandle',
[HANDLE, HANDLE, HANDLE, LPHANDLE, DWORD,
BOOL, DWORD], BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
CreateFileMapping, _ = winexternal('CreateFileMappingA',
[HANDLE, rwin32.LPSECURITY_ATTRIBUTES,
DWORD, DWORD, DWORD, LPCSTR], HANDLE,
save_err=rffi.RFFI_SAVE_LASTERROR)
MapViewOfFile, _ = winexternal('MapViewOfFile', [HANDLE, DWORD, DWORD,
DWORD, SIZE_T], LPCSTR,
save_err=rffi.RFFI_SAVE_LASTERROR) ##!!LPVOID
_, UnmapViewOfFile_safe = winexternal('UnmapViewOfFile', [LPCSTR], BOOL)
FlushViewOfFile, _ = winexternal('FlushViewOfFile', [LPCSTR, SIZE_T], BOOL)
SetFilePointer, _ = winexternal('SetFilePointer', [HANDLE, LONG, PLONG, DWORD], DWORD)
SetEndOfFile, _ = winexternal('SetEndOfFile', [HANDLE], BOOL)
VirtualAlloc, VirtualAlloc_safe = winexternal('VirtualAlloc',
[rffi.VOIDP, rffi.SIZE_T, DWORD, DWORD],
rffi.VOIDP)
_, _VirtualAlloc_safe_no_wrapper = winexternal('VirtualAlloc',
[rffi.VOIDP, rffi.SIZE_T, DWORD, DWORD],
rffi.VOIDP, _nowrapper=True)
_, _VirtualProtect_safe = winexternal('VirtualProtect',
[rffi.VOIDP, rffi.SIZE_T, DWORD, LPDWORD],
BOOL)
@specialize.ll()
def VirtualProtect(addr, size, mode, oldmode_ptr):
return _VirtualProtect_safe(addr,
rffi.cast(rffi.SIZE_T, size),
rffi.cast(DWORD, mode),
oldmode_ptr)
VirtualFree, VirtualFree_safe = winexternal('VirtualFree',
[rffi.VOIDP, rffi.SIZE_T, DWORD], BOOL)
def _get_page_size():
try:
si = rffi.make(SYSTEM_INFO)
GetSystemInfo(si)
return int(si.c_dwPageSize)
finally:
lltype.free(si, flavor="raw")
def _get_allocation_granularity():
try:
si = rffi.make(SYSTEM_INFO)
GetSystemInfo(si)
return int(si.c_dwAllocationGranularity)
finally:
lltype.free(si, flavor="raw")
def _get_file_size(handle):
# XXX use native Windows types like WORD
high_ref = lltype.malloc(LPDWORD.TO, 1, flavor='raw')
try:
low = GetFileSize(handle, high_ref)
low = rffi.cast(lltype.Signed, low)
# XXX should be propagate the real type, allowing
# for 2*sys.maxint?
high = high_ref[0]
high = rffi.cast(lltype.Signed, high)
# low might just happen to have the value INVALID_FILE_SIZE
# so we need to check the last error also
INVALID_FILE_SIZE = -1
if low == INVALID_FILE_SIZE:
err = rwin32.GetLastError_saved()
if err:
raise WindowsError(err, "mmap")
return low, high
finally:
lltype.free(high_ref, flavor='raw')
INVALID_HANDLE = INVALID_HANDLE_VALUE
PAGESIZE = _get_page_size()
ALLOCATIONGRANULARITY = _get_allocation_granularity()
NULL = lltype.nullptr(PTR.TO)
NODATA = lltype.nullptr(PTR.TO)
class MMap(object):
def __init__(self, access, offset):
self.size = 0
self.pos = 0
self.access = access
self.offset = offset
if _MS_WINDOWS:
self.map_handle = NULL_HANDLE
self.file_handle = NULL_HANDLE
self.tagname = ""
elif _POSIX:
self.fd = -1
self.closed = False
def check_valid(self):
if _MS_WINDOWS:
to_close = self.map_handle == INVALID_HANDLE
elif _POSIX:
to_close = self.closed
if to_close:
raise RValueError("map closed or invalid")
def check_writeable(self):
if not (self.access != ACCESS_READ):
raise RTypeError("mmap can't modify a readonly memory map.")
def check_resizeable(self):
if not (self.access == ACCESS_WRITE or self.access == _ACCESS_DEFAULT):
raise RTypeError("mmap can't resize a readonly or copy-on-write memory map.")
def setdata(self, data, size):
"""Set the internal data and map size from a PTR."""
assert size >= 0
self.data = data
self.size = size
def unmap(self):
if _MS_WINDOWS:
UnmapViewOfFile_safe(self.getptr(0))
elif _POSIX:
self.unmap_range(0, self.size)
if _POSIX:
def unmap_range(self, offset, size):
"""Unmap (a portion of) the mapped range.
Per munmap(1), the offset must be a multiple of the page size,
and the size will be rounded up to a multiple of the page size.
"""
c_munmap_safe(self.getptr(offset), size)
def close(self):
if _MS_WINDOWS:
if self.size > 0:
self.unmap()
self.setdata(NODATA, 0)
if self.map_handle != INVALID_HANDLE:
rwin32.CloseHandle_no_err(self.map_handle)
self.map_handle = INVALID_HANDLE
if self.file_handle != INVALID_HANDLE:
rwin32.CloseHandle_no_err(self.file_handle)
self.file_handle = INVALID_HANDLE
elif _POSIX:
self.closed = True
if self.fd != -1:
# XXX this is buggy - raising in an RPython del is not a good
# idea, we should swallow the exception or ignore the
# underlaying close error code
os.close(self.fd)
self.fd = -1
if self.size > 0:
self.unmap()
self.setdata(NODATA, 0)
def __del__(self):
self.close()
def read_byte(self):
if self.pos < self.size:
value = self.data[self.pos]
self.pos += 1
return value
else:
raise RValueError("read byte out of range")
def readline(self):
data = self.data
for pos in xrange(self.pos, self.size):
if data[pos] == '\n':
eol = pos + 1 # we're interested in the position after new line
break
else: # no '\n' found
eol = self.size
res = self.getslice(self.pos, eol - self.pos)
self.pos += len(res)
return res
def read(self, num=-1):
if num < 0:
# read all
eol = self.size
else:
eol = self.pos + num
# silently adjust out of range requests
if eol > self.size:
eol = self.size
res = self.getslice(self.pos, eol - self.pos)
self.pos += len(res)
return res
def find(self, tofind, start, end, reverse=False):
# XXX naive! how can we reuse the rstr algorithm?
if start < 0:
start += self.size
if start < 0:
start = 0
if end < 0:
end += self.size
if end < 0:
end = 0
elif end > self.size:
end = self.size
#
upto = end - len(tofind)
if not reverse:
step = 1
p = start
if p > upto:
return -1 # failure (empty range to search)
else:
step = -1
p = upto
upto = start
if p < upto:
return -1 # failure (empty range to search)
#
data = self.data
while True:
assert p >= 0
for q in range(len(tofind)):
if data[p+q] != tofind[q]:
break # position 'p' is not a match
else:
# full match
return p
#
if p == upto:
return -1 # failure
p += step
def seek(self, pos, whence=0):
dist = pos
how = whence
if how == 0: # relative to start
where = dist
elif how == 1: # relative to current position
where = self.pos + dist
elif how == 2: # relative to the end
where = self.size + dist
else:
raise RValueError("unknown seek type")
if not (0 <= where <= self.size):
raise RValueError("seek out of range")
self.pos = intmask(where)
def tell(self):
return self.pos
def file_size(self):
size = self.size
if _MS_WINDOWS:
if self.file_handle != INVALID_HANDLE:
low, high = _get_file_size(self.file_handle)
if not high and low <= sys.maxint:
return low
# not so sure if the signed/unsigned strictness is a good idea:
high = rffi.cast(lltype.Unsigned, high)
low = rffi.cast(lltype.Unsigned, low)
size = (high << 32) + low
size = rffi.cast(lltype.Signed, size)
elif _POSIX:
st = os.fstat(self.fd)
size = st[stat.ST_SIZE]
return size
def write(self, data):
data_len = len(data)
start = self.pos
if start + data_len > self.size:
raise RValueError("data out of range")
self.setslice(start, data)
self.pos = start + data_len
return data_len
def write_byte(self, byte):
if len(byte) != 1:
raise RTypeError("write_byte() argument must be char")
if self.pos >= self.size:
raise RValueError("write byte out of range")
self.data[self.pos] = byte[0]
self.pos += 1
def getptr(self, offset):
return rffi.ptradd(self.data, offset)
def getslice(self, start, length):
if length < 0:
return ''
return rffi.charpsize2str(self.getptr(start), length)
def setslice(self, start, newdata):
internaldata = self.data
for i in range(len(newdata)):
internaldata[start+i] = newdata[i]
def flush(self, offset=0, size=0):
if size == 0:
size = self.size
if offset < 0 or size < 0 or offset + size > self.size:
raise RValueError("flush values out of range")
else:
start = self.getptr(offset)
if _MS_WINDOWS:
res = FlushViewOfFile(start, size)
# XXX res == 0 means that an error occurred, but in CPython
# this is not checked
return res
elif _POSIX:
res = c_msync(start, size, MS_SYNC)
if res == -1:
errno = rposix.get_saved_errno()
raise OSError(errno, os.strerror(errno))
return 0
def move(self, dest, src, count):
# check boundings
if (src < 0 or dest < 0 or count < 0 or
src + count > self.size or dest + count > self.size):
raise RValueError("source or destination out of range")
datasrc = self.getptr(src)
datadest = self.getptr(dest)
c_memmove(datadest, datasrc, count)
def resize(self, newsize):
if _POSIX:
if not has_mremap:
raise RValueError("mmap: resizing not available--no mremap()")
# resize the underlying file first, if there is one
if self.fd >= 0:
os.ftruncate(self.fd, self.offset + newsize)
# now resize the mmap
newdata = c_mremap(self.getptr(0), self.size, newsize,
MREMAP_MAYMOVE or 0)
self.setdata(newdata, newsize)
elif _MS_WINDOWS:
# disconnect the mapping
self.unmap()
rwin32.CloseHandle_no_err(self.map_handle)
# move to the desired EOF position
if _64BIT:
newsize_high = (self.offset + newsize) >> 32
newsize_low = (self.offset + newsize) & 0xFFFFFFFF
offset_high = self.offset >> 32
offset_low = self.offset & 0xFFFFFFFF
else:
newsize_high = 0
newsize_low = self.offset + newsize
offset_high = 0
offset_low = self.offset
FILE_BEGIN = 0
high_ref = lltype.malloc(PLONG.TO, 1, flavor='raw')
try:
high_ref[0] = rffi.cast(LONG, newsize_high)
SetFilePointer(self.file_handle, newsize_low, high_ref,
FILE_BEGIN)
finally:
lltype.free(high_ref, flavor='raw')
# resize the file
SetEndOfFile(self.file_handle)
# create another mapping object and remap the file view
res = CreateFileMapping(self.file_handle, NULL, PAGE_READWRITE,
newsize_high, newsize_low, self.tagname)
self.map_handle = res
if self.map_handle:
data = MapViewOfFile(self.map_handle, FILE_MAP_WRITE,
offset_high, offset_low, newsize)
if data:
# XXX we should have a real LPVOID which must always be casted
charp = rffi.cast(LPCSTR, data)
self.setdata(charp, newsize)
return
winerror = rwin32.lastSavedWindowsError()
if self.map_handle:
rwin32.CloseHandle_no_err(self.map_handle)
self.map_handle = INVALID_HANDLE
raise winerror
def len(self):
return self.size
def getitem(self, index):
# simplified version, for rpython
self.check_valid()
if index < 0:
index += self.size
return self.data[index]
def setitem(self, index, value):
if len(value) != 1:
raise RValueError("mmap assignment must be "
"single-character string")
if index < 0:
index += self.size
self.data[index] = value[0]
def _check_map_size(size):
if size < 0:
raise RTypeError("memory mapped size must be positive")
if _POSIX:
def mmap(fileno, length, flags=MAP_SHARED,
prot=PROT_WRITE | PROT_READ, access=_ACCESS_DEFAULT, offset=0):
fd = fileno
# check access is not there when flags and prot are there
if access != _ACCESS_DEFAULT and ((flags != MAP_SHARED) or
(prot != (PROT_WRITE | PROT_READ))):
raise RValueError("mmap can't specify both access and flags, prot.")
# check size boundaries
_check_map_size(length)
map_size = length
if offset < 0:
raise RValueError("negative offset")
if access == ACCESS_READ:
flags = MAP_SHARED
prot = PROT_READ
elif access == ACCESS_WRITE:
flags = MAP_SHARED
prot = PROT_READ | PROT_WRITE
elif access == ACCESS_COPY:
flags = MAP_PRIVATE
prot = PROT_READ | PROT_WRITE
elif access == _ACCESS_DEFAULT:
# map prot to access type
if prot & PROT_READ and prot & PROT_WRITE:
pass # _ACCESS_DEFAULT
elif prot & PROT_WRITE:
access = ACCESS_WRITE
else:
access = ACCESS_READ
else:
raise RValueError("mmap invalid access parameter.")
# check file size
try:
st = os.fstat(fd)
except OSError:
pass # ignore errors and trust map_size
else:
mode = st[stat.ST_MODE]
size = st[stat.ST_SIZE]
if stat.S_ISREG(mode):
if map_size == 0:
if size == 0:
raise RValueError("cannot mmap an empty file")
if offset > size:
raise RValueError(
"mmap offset is greater than file size")
map_size = int(size - offset)
if map_size != size - offset:
raise RValueError("mmap length is too large")
elif offset + map_size > size:
raise RValueError("mmap length is greater than file size")
m = MMap(access, offset)
if fd == -1:
# Assume the caller wants to map anonymous memory.
# This is the same behaviour as Windows. mmap.mmap(-1, size)
# on both Windows and Unix map anonymous memory.
m.fd = -1
flags |= MAP_ANONYMOUS
else:
m.fd = os.dup(fd)
# XXX if we use hintp below in alloc, the NonConstant
# is necessary since we want a general version of c_mmap
# to be annotated with a non-constant pointer.
res = c_mmap(NonConstant(NULL), map_size, prot, flags, fd, offset)
if res == rffi.cast(PTR, -1):
errno = rposix.get_saved_errno()
raise OSError(errno, os.strerror(errno))
m.setdata(res, map_size)
return m
def alloc_hinted(hintp, map_size):
flags = MAP_PRIVATE | MAP_ANONYMOUS
prot = PROT_EXEC | PROT_READ | PROT_WRITE
if we_are_translated():
flags = NonConstant(flags)
prot = NonConstant(prot)
return c_mmap_safe(hintp, map_size, prot, flags, -1, 0)
def clear_large_memory_chunk_aligned(addr, map_size):
addr = rffi.cast(PTR, addr)
flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS
prot = PROT_READ | PROT_WRITE
if we_are_translated():
flags = NonConstant(flags)
prot = NonConstant(prot)
res = c_mmap_safe(addr, map_size, prot, flags, -1, 0)
return res == addr
# XXX is this really necessary?
class Hint:
pos = -0x4fff0000 # for reproducible results
hint = Hint()
def alloc(map_size):
"""Allocate memory. This is intended to be used by the JIT,
so the memory has the executable bit set and gets allocated
internally in case of a sandboxed process.
"""
from errno import ENOMEM
from rpython.rlib import debug
if _CYGWIN:
# XXX: JIT memory should be using mmap MAP_PRIVATE with
# PROT_EXEC but Cygwin's fork() fails. mprotect()
# cannot be used, but seems to be unnecessary there.
res = c_malloc_safe(map_size)
if res == rffi.cast(PTR, 0):
raise MemoryError
return res
res = alloc_hinted(rffi.cast(PTR, hint.pos), map_size)
if res == rffi.cast(PTR, -1):
# some systems (some versions of OS/X?) complain if they
# are passed a non-zero address. Try again.
res = alloc_hinted(rffi.cast(PTR, 0), map_size)
if res == rffi.cast(PTR, -1):
# ENOMEM simply raises MemoryError, but other errors are fatal
if rposix.get_saved_errno() != ENOMEM:
debug.fatalerror_notb(
"Got an unexpected error trying to allocate some "
"memory for the JIT (tried to do mmap() with "
"PROT_EXEC|PROT_READ|PROT_WRITE). This can be caused "
"by a system policy like PAX. You need to find how "
"to work around the policy on your system.")
raise MemoryError
else:
hint.pos += map_size
return res
alloc._annenforceargs_ = (int,)
if _CYGWIN:
free = c_free_safe
else:
free = c_munmap_safe
if sys.platform.startswith('linux'):
assert has_madvise
assert MADV_DONTNEED is not None
if MADV_FREE is None:
MADV_FREE = 8 # from the kernel sources of Linux >= 4.5
class CanUseMadvFree:
ok = -1
can_use_madv_free = CanUseMadvFree()
def madvise_free(addr, map_size):
# We don't know if we are running on a recent enough kernel
# that supports MADV_FREE. Check that at runtime: if the
# first call to madvise(MADV_FREE) fails, we assume it's
# because of EINVAL and we fall back to MADV_DONTNEED.
if can_use_madv_free.ok != 0:
res = c_madvise_safe(rffi.cast(PTR, addr),
rffi.cast(size_t, map_size),
rffi.cast(rffi.INT, MADV_FREE))
if can_use_madv_free.ok == -1:
can_use_madv_free.ok = (rffi.cast(lltype.Signed, res) == 0)
if can_use_madv_free.ok == 0:
c_madvise_safe(rffi.cast(PTR, addr),
rffi.cast(size_t, map_size),
rffi.cast(rffi.INT, MADV_DONTNEED))
elif has_madvise and not (MADV_FREE is MADV_DONTNEED is None):
use_flag = MADV_FREE if MADV_FREE is not None else MADV_DONTNEED
def madvise_free(addr, map_size):
c_madvise_safe(rffi.cast(PTR, addr),
rffi.cast(size_t, map_size),
rffi.cast(rffi.INT, use_flag))
else:
def madvise_free(addr, map_size):
"No madvise() on this platform"
elif _MS_WINDOWS:
def mmap(fileno, length, tagname="", access=_ACCESS_DEFAULT, offset=0):
# XXX flags is or-ed into access by now.
flags = 0
# check size boundaries
_check_map_size(length)
map_size = length
if offset < 0:
raise RValueError("negative offset")
flProtect = 0
dwDesiredAccess = 0
fh = NULL_HANDLE
if access == ACCESS_READ:
flProtect = PAGE_READONLY
dwDesiredAccess = FILE_MAP_READ
elif access == _ACCESS_DEFAULT or access == ACCESS_WRITE:
flProtect = PAGE_READWRITE
dwDesiredAccess = FILE_MAP_WRITE
elif access == ACCESS_COPY:
flProtect = PAGE_WRITECOPY
dwDesiredAccess = FILE_MAP_COPY
else:
raise RValueError("mmap invalid access parameter.")
# assume -1 and 0 both mean invalid file descriptor
# to 'anonymously' map memory.
if fileno != -1 and fileno != 0:
fh = rwin32.get_osfhandle(fileno)
# Win9x appears to need us seeked to zero
# SEEK_SET = 0
# libc._lseek(fileno, 0, SEEK_SET)
# check file size
try:
low, high = _get_file_size(fh)
except OSError:
pass # ignore non-seeking files and errors and trust map_size
else:
if not high and low <= sys.maxint:
size = low
else:
# not so sure if the signed/unsigned strictness is a good idea:
high = rffi.cast(lltype.Unsigned, high)
low = rffi.cast(lltype.Unsigned, low)
size = (high << 32) + low
size = rffi.cast(lltype.Signed, size)
if map_size == 0:
if size == 0:
raise RValueError("cannot mmap an empty file")
if offset > size:
raise RValueError(
"mmap offset is greater than file size")
map_size = int(size - offset)
if map_size != size - offset:
raise RValueError("mmap length is too large")
elif offset + map_size > size:
raise RValueError("mmap length is greater than file size")
m = MMap(access, offset)
m.file_handle = INVALID_HANDLE
m.map_handle = INVALID_HANDLE
if fh:
# it is necessary to duplicate the handle, so the
# Python code can close it on us
handle_ref = lltype.malloc(LPHANDLE.TO, 1, flavor='raw')
handle_ref[0] = m.file_handle
try:
res = DuplicateHandle(GetCurrentProcess(), # source process handle
fh, # handle to be duplicated
GetCurrentProcess(), # target process handle
handle_ref, # result
0, # access - ignored due to options value
False, # inherited by child procs?
DUPLICATE_SAME_ACCESS) # options
if not res:
raise rwin32.lastSavedWindowsError()
m.file_handle = handle_ref[0]
finally:
lltype.free(handle_ref, flavor='raw')
if not map_size:
low, high = _get_file_size(fh)
if _64BIT:
map_size = (low << 32) + 1
else:
if high:
# file is too large to map completely
map_size = -1
else:
map_size = low
if tagname:
m.tagname = tagname
# DWORD is a 4-byte int. If int > 4-byte it must be divided
if _64BIT:
size_hi = (map_size + offset) >> 32
size_lo = (map_size + offset) & 0xFFFFFFFF
offset_hi = offset >> 32
offset_lo = offset & 0xFFFFFFFF
else:
size_hi = 0
size_lo = map_size + offset
offset_hi = 0
offset_lo = offset
flProtect |= flags
m.map_handle = CreateFileMapping(m.file_handle, NULL, flProtect,
size_hi, size_lo, m.tagname)
if m.map_handle:
data = MapViewOfFile(m.map_handle, dwDesiredAccess,
offset_hi, offset_lo, length)
if data:
# XXX we should have a real LPVOID which must always be casted
charp = rffi.cast(LPCSTR, data)
m.setdata(charp, map_size)
return m
winerror = rwin32.lastSavedWindowsError()
if m.map_handle:
rwin32.CloseHandle_no_err(m.map_handle)
m.map_handle = INVALID_HANDLE
raise winerror
class Hint:
pos = -0x4fff0000 # for reproducible results
hint = Hint()
# XXX this has no effect on windows
def alloc(map_size):
"""Allocate memory. This is intended to be used by the JIT,
so the memory has the executable bit set.
XXX implement me: it should get allocated internally in
case of a sandboxed process
"""
null = lltype.nullptr(rffi.VOIDP.TO)
res = VirtualAlloc_safe(null, map_size, MEM_COMMIT | MEM_RESERVE,
PAGE_EXECUTE_READWRITE)
if not res:
raise MemoryError
arg = lltype.malloc(LPDWORD.TO, 1, zero=True, flavor='raw')
VirtualProtect(res, map_size, PAGE_EXECUTE_READWRITE, arg)
lltype.free(arg, flavor='raw')
# ignore errors, just try
return res
alloc._annenforceargs_ = (int,)
def free(ptr, map_size):
VirtualFree_safe(ptr, 0, MEM_RELEASE)
def madvise_free(addr, map_size):
r = _VirtualAlloc_safe_no_wrapper(
rffi.cast(rffi.VOIDP, addr),
rffi.cast(rffi.SIZE_T, map_size),
rffi.cast(DWORD, MEM_RESET),
rffi.cast(DWORD, PAGE_READWRITE))
#from rpython.rlib import debug
#debug.debug_print("madvise_free:", r)
| 37.867905
| 90
| 0.551017
|
4ea093c247066f0a43c2990302bdb26c81a6c721
| 3,505
|
py
|
Python
|
unet/train.py
|
dylanv/unet
|
31f0deac92c89fff9a86573439235efc09fb1b37
|
[
"MIT"
] | null | null | null |
unet/train.py
|
dylanv/unet
|
31f0deac92c89fff9a86573439235efc09fb1b37
|
[
"MIT"
] | null | null | null |
unet/train.py
|
dylanv/unet
|
31f0deac92c89fff9a86573439235efc09fb1b37
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
import albumentations as A
import cv2
import matplotlib.pyplot as plt
import torch
from albumentations.pytorch import ToTensorV2
from ignite.contrib.handlers import ProgressBar
from ignite.contrib.metrics import GpuInfo
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
from ignite.metrics import Accuracy, Loss
from segmentation_models_pytorch.losses import DiceLoss
from torch.optim import SGD
from torch.utils.data import DataLoader, Dataset
from unet.model import Unet
dataset = Path("/home/dylan/Dropbox/Projects/datasets/aerial_segmentation/dataset/semantic_drone_dataset")
imgs = dataset / "original_images"
masks = dataset / "label_images_semantic"
class SemanticDroneDataset(Dataset):
def __init__(self, images_path, masks_path):
super().__init__()
self.images_path, self.masks_path = images_path, masks_path
self.image_names = self._get_matched_images()
self.output_transform = A.Compose(
[
A.RandomCrop(height=256, width=256, always_apply=True),
# A.Resize(height=256, width=256, always_apply=True),
A.ToFloat(always_apply=True),
ToTensorV2(always_apply=True),
],
)
def _get_matched_images(self):
matched_image_names = []
for img_name in self.images_path.glob(f"*.jpg"):
label_name = img_name.with_suffix(".png").name
labels = list(self.masks_path.glob(label_name))
if len(labels) == 1:
matched_image_names.append(img_name.stem)
return matched_image_names
def __len__(self):
return len(self.image_names)
def __getitem__(self, item):
filename = self.image_names[item]
img_path, mask_path = self.images_path / filename, self.masks_path / filename
img = cv2.imread(str(img_path.with_suffix(".jpg")), cv2.IMREAD_COLOR)
mask = cv2.imread(str(mask_path.with_suffix(".png")), cv2.IMREAD_GRAYSCALE)
aug = self.output_transform(image=img, mask=mask)
return aug["image"], aug["mask"].to(torch.int64)
def show_plot(self, item):
img, mask = self[0]
img, mask = img.numpy(), mask.numpy()
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.imshow(img)
ax2.imshow(mask)
plt.tight_layout()
plt.show()
train_dataset = SemanticDroneDataset(imgs, masks)
img, mask = train_dataset[0]
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, num_workers=os.cpu_count())
model = Unet(3, 24, attention=True)
criterion = DiceLoss("multiclass")
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=0.001, momentum=0.1)
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
evaluator = create_supervised_evaluator(model, metrics={"accuracy": Accuracy(), "loss": Loss(criterion)}, device=device)
GpuInfo().attach(trainer, name="gpu")
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names="all")
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_loss = metrics["loss"]
pbar.log_message(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_loss:.2f}"
)
trainer.run(train_loader, max_epochs=3)
| 34.362745
| 120
| 0.701569
|
a03e154ecd6d6f397f2e95cc2c55d3f3a48cfdbe
| 1,733
|
py
|
Python
|
src/test/dir_stats_test.py
|
ferzerkerx/git-greenseer
|
5e5a054a366ac6295a0d7b977bc44f7923da7291
|
[
"MIT"
] | 3
|
2015-10-02T19:37:44.000Z
|
2016-11-01T19:06:13.000Z
|
src/test/dir_stats_test.py
|
ferzerkerx/git-greenseer
|
5e5a054a366ac6295a0d7b977bc44f7923da7291
|
[
"MIT"
] | null | null | null |
src/test/dir_stats_test.py
|
ferzerkerx/git-greenseer
|
5e5a054a366ac6295a0d7b977bc44f7923da7291
|
[
"MIT"
] | null | null | null |
__author__ = 'fmontes'
import unittest
from main.contribution_stats import ContributionStats
from main.dir_stats import DirStats
class DirStatsTest(unittest.TestCase):
def test_add_file_contributions(self):
dir_name = '/someDir'
dir_stats = DirStats(dir_name)
dir_stats.add_file_contributions('some_file1', self.create_file_contributions())
dir_stats.add_file_contributions('some_file2', self.create_file_contributions())
dir_stats.add_file_contributions('some_file3', self.create_file_contributions())
dir_stats.add_file_contributions('some_file4', self.create_file_contributions())
self.assertEqual(len(dir_stats.sorted_contributions), 3)
first_contribution = dir_stats.sorted_contributions[0]
name = first_contribution[0]
contributions = first_contribution[1]
self.assertEqual(name, 'foo_name3')
self.assertAlmostEqual(contributions.average(1000), 40, 3)
def create_file_contributions(self):
contributor_name1 = 'foo_name1'
contributor_name2 = 'foo_name2'
contributor_name3 = 'foo_name3'
file_contributions = [self.create_contribution(contributor_name1),
self.create_contribution(contributor_name2),
self.create_contribution(contributor_name3)]
return file_contributions
@staticmethod
def create_contribution(contributor_name, line_count=100, total_lines_to_consider=1050.0):
contributor_stats = ContributionStats(contributor_name, line_count)
contributor_stats.total_lines_to_consider = total_lines_to_consider
return contributor_stats
if __name__ == '__main__':
unittest.main()
| 39.386364
| 94
| 0.729371
|
c370fd8730be8a57bf73a3c93883bf0055a37580
| 6,630
|
py
|
Python
|
cogs_hidden/logs.py
|
Saizuo/EpicBot
|
45cebe79b7bf90c0a067162fb3646a176cb6394a
|
[
"BSD-3-Clause"
] | 3
|
2021-09-13T11:28:49.000Z
|
2022-02-21T15:16:35.000Z
|
cogs_hidden/logs.py
|
Saizuo/EpicBot
|
45cebe79b7bf90c0a067162fb3646a176cb6394a
|
[
"BSD-3-Clause"
] | 1
|
2021-09-27T07:45:29.000Z
|
2021-09-27T07:45:29.000Z
|
cogs_hidden/logs.py
|
Saizuo/EpicBot
|
45cebe79b7bf90c0a067162fb3646a176cb6394a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright 2021 Nirlep_5252_
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import discord
from discord.ext import commands
from utils.embed import success_embed, error_embed
from utils.bot import EpicBot
from config import (
COOLDOWN_BYPASS, EMOJIS, OWNERS,
PREFIX, MAIN_COLOR, EMPTY_CHARACTER, WEBSITE_LINK,
SUPPORT_SERVER_LINK
)
class Logs(commands.Cog):
def __init__(self, client: EpicBot):
self.client = client
@commands.Cog.listener(name="on_command_completion")
async def add_cmd_used_count_user_profile(self, ctx: commands.Context):
user_profile = await self.client.get_user_profile_(ctx.author.id)
user_profile.update({"cmds_used": user_profile['cmds_used'] + 1})
@commands.Cog.listener()
async def on_command(self, ctx: commands.Context):
if ctx.author.id in COOLDOWN_BYPASS:
ctx.command.reset_cooldown(ctx)
if ctx.author.id in OWNERS:
return
embed = success_embed(
"Ah yes",
"Some kid used me"
).add_field(name="Command:", value=f"```{ctx.message.content}```", inline=False
).add_field(name="User:", value=f"{ctx.author.mention}```{ctx.author}\n{ctx.author.id}```", inline=False
).add_field(name="Server:", value=f"```{ctx.guild}\n{ctx.guild.id}```", inline=False
).add_field(name="Channel:", value=f"{ctx.channel.mention}```{ctx.channel}\n{ctx.channel.id}```", inline=False)
webhooks = self.client.get_cog("Webhooks").webhooks
webhook = webhooks.get("cmd_uses")
await webhook.send(embed=embed)
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author.bot:
return
if str(message.channel.type) != 'private':
return
files = [await e.to_file() for e in message.attachments]
embed = success_embed("New DM!", message.system_content).add_field(
name="Kid:",
value=f"{message.author.mention}```{message.author}\n{message.author.id}```",
inline=False
).set_footer(text=f"Message ID: {message.id}").set_author(name=message.author, icon_url=message.author.display_avatar.url)
for sticker in message.stickers:
embed.add_field(
name="Sticker:",
value=f"[{sticker.name} (ID: {sticker.id})]({sticker.url})"
)
if len(message.stickers) == 1:
embed.set_image(url=message.stickers[0].url)
await self.client.get_channel(793482521076695070).send(embed=embed, files=files)
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
await self.client.get_guild_config(guild.id)
embed = success_embed(
f"{EMOJIS['add']} Mikuni Added",
f"""
**Server:** ```{guild} ({guild.id})```
**Owner:** {guild.owner.mention}```{guild.owner} ({guild.owner_id})```
**Members:** {guild.member_count}
**Humans:** {len(list(filter(lambda m: not m.bot, guild.members)))}
**Bots:** {len(list(filter(lambda m: m.bot, guild.members)))}
"""
).set_author(name=guild.owner, icon_url=guild.owner.display_avatar.url)
if guild.icon is not None:
embed.set_thumbnail(url=guild.icon.url)
try:
webhook = self.client.get_cog("Webhooks").webhooks.get("add_remove")
await webhook.send(embed=embed)
except Exception:
pass
send_embed = discord.Embed(
title=f"{EMOJIS['wave_1']} Hi, UwU!~",
description=f"""
Thank you very much for inviting me, I love you!~
My prefix is `{PREFIX}`, but you can change it to whatever you want!
Let me tell you more about me!
~ I am a simple, multipurpose bot designed to make your Discord life simpler.
~ I have a lot of amazing modules that you can discover by using the command `{PREFIX}help`
~ I leave the rest for you to discover... <:hehe:866211987716833300>
I hope you have a fun time with me, UwU!~
""",
color=MAIN_COLOR
).set_thumbnail(url=self.client.user.display_avatar.url
).set_author(name=self.client.user.name, icon_url=self.client.user.display_avatar.url
).add_field(name=EMPTY_CHARACTER, value=f"[Invite Mikuni]({WEBSITE_LINK}/invite) | [Vote Mikuni]({WEBSITE_LINK}/vote) | [Support Server]({SUPPORT_SERVER_LINK})", inline=False)
for channel in guild.channels:
if "general" in channel.name:
try:
return await channel.send(embed=send_embed)
except Exception:
pass
for channel in guild.channels:
if "bot" in channel.name or "cmd" in channel.name or "command" in channel.name:
try:
return await channel.send(embed=send_embed)
except Exception:
pass
for channel in guild.channels:
try:
return await channel.send(embed=send_embed)
except Exception:
pass
@commands.Cog.listener()
async def on_guild_remove(self, guild: discord.Guild):
embed = error_embed(
f"{EMOJIS['remove']} Mikuni Removed",
f"""
**Server:** ```{guild} ({guild.id})```
**Owner:** {guild.owner.mention}```{guild.owner} ({guild.owner_id})```
**Members:** {guild.member_count}
**Humans:** {len(list(filter(lambda m: not m.bot, guild.members)))}
**Bots:** {len(list(filter(lambda m: m.bot, guild.members)))}
"""
).set_author(name=guild.owner, icon_url=guild.owner.display_avatar.url)
if guild.icon is not None:
embed.set_thumbnail(url=guild.icon.url)
for e in self.client.serverconfig_cache:
if e['_id'] == guild.id:
self.client.serverconfig_cache.remove(e)
await self.client.serverconfig.delete_one({"_id": guild.id})
break
webhook = self.client.get_cog("Webhooks").webhooks.get("add_remove")
await webhook.send(embed=embed)
def setup(client):
client.add_cog(Logs(client))
| 40.674847
| 183
| 0.633635
|
917a3acde58cbb601e3fe33318443144feb9ed4d
| 7,998
|
py
|
Python
|
cryptoapis/model/uri_not_found.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/uri_not_found.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/uri_not_found.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | 1
|
2021-07-21T03:35:18.000Z
|
2021-07-21T03:35:18.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from cryptoapis.model.uri_not_found_error import UriNotFoundError
globals()['UriNotFoundError'] = UriNotFoundError
class UriNotFound(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'api_version': (str,), # noqa: E501
'request_id': (str,), # noqa: E501
'error': (UriNotFoundError,), # noqa: E501
'context': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'api_version': 'apiVersion', # noqa: E501
'request_id': 'requestId', # noqa: E501
'error': 'error', # noqa: E501
'context': 'context', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, api_version, request_id, error, *args, **kwargs): # noqa: E501
"""UriNotFound - a model defined in OpenAPI
Args:
api_version (str): Specifies the version of the API that incorporates this endpoint.
request_id (str): Defines the ID of the request. The `requestId` is generated by Crypto APIs and it's unique for every request.
error (UriNotFoundError):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.api_version = api_version
self.request_id = request_id
self.error = error
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 42.770053
| 484
| 0.606902
|
b24b28731c61f272f73e2bd079ce13afff17bb1e
| 132,649
|
py
|
Python
|
Lib/test/pickletester.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 1,886
|
2021-05-03T23:58:43.000Z
|
2022-03-31T19:15:58.000Z
|
Lib/test/pickletester.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 70
|
2021-05-04T23:25:35.000Z
|
2022-03-31T18:42:08.000Z
|
Lib/test/pickletester.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 52
|
2021-05-04T21:26:03.000Z
|
2022-03-08T18:02:56.000Z
|
import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, reap_threads, forget,
save_restore_warnings_filters
)
from pickle import bytes_types
# bpo-41003: Save/restore warnings filters to leave them unchanged.
# Ignore filters installed by numpy.
try:
with save_restore_warnings_filters():
import numpy as np
except ImportError:
np = None
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
class MinimalIO(object):
"""
A file-like object that doesn't support readinto().
"""
def __init__(self, *args):
self._bio = io.BytesIO(*args)
self.getvalue = self._bio.getvalue
self.read = self._bio.read
self.readline = self._bio.readline
self.write = self._bio.write
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
class H(object):
pass
# Hashable mutable key
class K(object):
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
__main__.K = K
K.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(KeyError, b'g0\np0')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_reduce(self):
self.assertEqual(self.loads(b'cbuiltins\nint\n)R.'), 0)
self.check_unpickling_error(TypeError, b'N)R.')
self.check_unpickling_error(TypeError, b'cbuiltins\nint\nNR.')
def test_bad_newobj(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)\x81.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)\x81.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN\x81.')
def test_bad_newobj_ex(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)}\x92.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\n)N\x92.')
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@reap_threads
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_tuple_and_list(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], list)
self.assertEqual(len(x[0]), 1)
self.assertIs(x[0][0], x)
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_key(self):
d = {}
k = K(d)
d[k] = 1
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(len(x.keys()), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_set(self):
y = set()
k = K(y)
y.add(k)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
def test_recursive_list_subclass(self):
y = MyList()
y.append(y)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, MyList)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_dict_subclass(self):
d = MyDict()
d[1] = d
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_subclass_key(self):
d = MyDict()
k = K(d)
d[k] = 1
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(len(list(x.keys())), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, C)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def check_recursive_collection_and_inst(self, factory):
h = H()
y = factory([h])
h.attr = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, type(y))
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], H)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self.check_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self.check_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self.check_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self.check_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self.check_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyFrozenSet)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
@unittest.skipUnderCinderJIT("JIT doesn't perform recursion checks")
@unittest.skipIfDebug("C stack overflow")
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
@support.skip_if_pgo_task
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
@support.skip_if_pgo_task
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
def test_inband_accept_default_buffers_argument(self):
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data_pickled = self.dumps(1, proto, buffer_callback=None)
data = self.loads(data_pickled, buffers=None)
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_dump_text_file(self):
f = open(TESTFN, "w")
try:
for proto in protocols:
self.assertRaises(TypeError, self.dump, 123, f, proto)
finally:
f.close()
support.unlink(TESTFN)
def test_incomplete_input(self):
s = io.BytesIO(b"X''.")
self.assertRaises((EOFError, struct.error, pickle.UnpicklingError), self.load, s)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass, *, seekable=True):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if seekable:
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if seekable:
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO, seekable=False)
def test_multiple_unpicklings_minimal(self):
# File-like object that doesn't support peek() and readinto()
# (bpo-39681)
self._check_multiple_unpicklings(MinimalIO, seekable=False)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests(unittest.TestCase):
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overriden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
@support.cpython_only
def test_reducer_override_no_reference_cycle(self):
# bpo-39492: reducer_override used to induce a spurious reference cycle
# inside the Pickler object, that could prevent all serialized objects
# from being garbage-collected without explicity invoking gc.collect.
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
def f():
pass
wr = weakref.ref(f)
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump(f)
new_f = pickle.loads(bio.getvalue())
assert new_f == 5
del p
del f
self.assertIsNone(wr())
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
| 35.928765
| 89
| 0.535737
|
491ee4a702cd033e771b2ef59f19c85a19e80d1f
| 1,139
|
py
|
Python
|
openwisp_utils/admin_theme/email.py
|
totallynotvaishnav/openwisp-utils
|
e8902a1bf5adea30167ce319be52927427cdbd5e
|
[
"BSD-3-Clause"
] | null | null | null |
openwisp_utils/admin_theme/email.py
|
totallynotvaishnav/openwisp-utils
|
e8902a1bf5adea30167ce319be52927427cdbd5e
|
[
"BSD-3-Clause"
] | 1
|
2022-01-25T17:46:52.000Z
|
2022-01-25T17:46:52.000Z
|
openwisp_utils/admin_theme/email.py
|
Aryamanz29/openwisp-utils
|
c5558fc14a22316e49e9144d39216de348e19b05
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from smtplib import SMTPRecipientsRefused
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from . import settings as app_settings
logger = logging.getLogger(__name__)
def send_email(subject, body_text, body_html, recipients, extra_context={}):
mail = EmailMultiAlternatives(
subject=subject,
body=strip_tags(body_text),
from_email=settings.DEFAULT_FROM_EMAIL,
to=recipients,
)
if app_settings.OPENWISP_HTML_EMAIL and body_html:
context = dict(
subject=subject,
message=body_html,
logo_url=app_settings.OPENWISP_EMAIL_LOGO,
)
context.update(extra_context)
html_message = render_to_string(
app_settings.OPENWISP_EMAIL_TEMPLATE,
context=context,
)
mail.attach_alternative(html_message, 'text/html')
try:
mail.send()
except SMTPRecipientsRefused as err:
logger.warning(f'SMTP recipients refused: {err.recipients}')
| 28.475
| 76
| 0.703248
|
944f8a5196086da000f1b5a9686251b886d4a831
| 1,442
|
py
|
Python
|
bootstrapvz/base/fs/partitionmaps/none.py
|
wrigri/bootstrap-vz
|
52b5846cb5acd5551fd818b9e84f42683455a3e5
|
[
"Apache-2.0"
] | null | null | null |
bootstrapvz/base/fs/partitionmaps/none.py
|
wrigri/bootstrap-vz
|
52b5846cb5acd5551fd818b9e84f42683455a3e5
|
[
"Apache-2.0"
] | null | null | null |
bootstrapvz/base/fs/partitionmaps/none.py
|
wrigri/bootstrap-vz
|
52b5846cb5acd5551fd818b9e84f42683455a3e5
|
[
"Apache-2.0"
] | null | null | null |
from ..partitions.single import SinglePartition
class NoPartitions(object):
"""Represents a virtual 'NoPartitions' partitionmap.
This virtual partition map exists because it is easier for tasks to
simply always deal with partition maps and then let the base abstract that away.
"""
def __init__(self, data, sector_size, bootloader):
"""
:param dict data: volume.partitions part of the manifest
:param int sector_size: Sectorsize of the volume
:param str bootloader: Name of the bootloader we will use for bootstrapping
"""
from bootstrapvz.common.sectors import Sectors
# In the NoPartitions partitions map we only have a single 'partition'
self.root = SinglePartition(Sectors(data['root']['size'], sector_size),
data['root']['filesystem'], data['root'].get('format_command', None))
self.partitions = [self.root]
def is_blocking(self):
"""Returns whether the partition map is blocking volume detach operations
:rtype: bool
"""
return self.root.fsm.current == 'mounted'
def get_total_size(self):
"""Returns the total size the partitions occupy
:return: The size of all the partitions
:rtype: Sectors
"""
return self.root.get_end()
def __getstate__(self):
state = self.__dict__.copy()
state['__class__'] = self.__module__ + '.' + self.__class__.__name__
return state
def __setstate__(self, state):
for key in state:
self.__dict__[key] = state[key]
| 31.347826
| 99
| 0.71914
|
e0b4d28958563f1cccfa618a0903713af58a1abe
| 3,412
|
py
|
Python
|
python/cuml/test/test_incremental_pca.py
|
ptartan21/cuml
|
4ce9b4181d47996a39406322d0e2009bd83db211
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/test/test_incremental_pca.py
|
ptartan21/cuml
|
4ce9b4181d47996a39406322d0e2009bd83db211
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/test/test_incremental_pca.py
|
ptartan21/cuml
|
4ce9b4181d47996a39406322d0e2009bd83db211
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import cupy as cp
import cupyx
from sklearn.decomposition import IncrementalPCA as skIPCA
from cuml.datasets import make_blobs
from cuml.experimental.decomposition import IncrementalPCA as cuIPCA
from cuml.test.utils import array_equal
@pytest.mark.parametrize(
'nrows, ncols, n_components, sparse_input, density, sparse_format,'
' batch_size_divider', [
(500, 15, 2, True, 0.4, 'csr', 5),
(5000, 25, 12, False, 0.07, 'csc', 10),
(5000, 15, None, True, 0.4, 'csc', 5),
(500, 25, 2, False, 0.07, 'csr', 10),
(5000, 25, 12, False, 0.07, 'csr', 10)
]
)
@pytest.mark.no_bad_cuml_array_check
def test_fit(nrows, ncols, n_components, sparse_input, density,
sparse_format, batch_size_divider):
if sparse_format == 'csc':
pytest.skip("cupyx.scipy.sparse.csc.csc_matrix does not support"
" indexing as of cupy 7.6.0")
if sparse_input:
X = cupyx.scipy.sparse.random(nrows, ncols, density=density,
random_state=10, format=sparse_format)
else:
X, _ = make_blobs(n_samples=nrows, n_features=ncols, random_state=10)
cu_ipca = cuIPCA(n_components=n_components,
batch_size=int(nrows / batch_size_divider))
cu_ipca.fit(X)
cu_t = cu_ipca.transform(X)
cu_inv = cu_ipca.inverse_transform(cu_t)
sk_ipca = skIPCA(n_components=n_components,
batch_size=int(nrows / batch_size_divider))
if sparse_input:
X = X.get()
else:
X = cp.asnumpy(X)
sk_ipca.fit(X)
sk_t = sk_ipca.transform(X)
sk_inv = sk_ipca.inverse_transform(sk_t)
assert array_equal(cu_inv, sk_inv,
5e-5, with_sign=True)
@pytest.mark.parametrize(
'nrows, ncols, n_components, density, batch_size_divider', [
(500, 15, 2, 0.07, 5),
(5000, 25, 12, 0.07, 10),
(5000, 15, 2, 0.4, 5),
(500, 25, 12, 0.4, 10),
]
)
@pytest.mark.no_bad_cuml_array_check
def test_partial_fit(nrows, ncols, n_components, density,
batch_size_divider):
X, _ = make_blobs(n_samples=nrows, n_features=ncols, random_state=10)
cu_ipca = cuIPCA(n_components=n_components)
sample_size = int(nrows / batch_size_divider)
for i in range(0, nrows, sample_size):
cu_ipca.partial_fit(X[i:i + sample_size].copy())
cu_t = cu_ipca.transform(X)
cu_inv = cu_ipca.inverse_transform(cu_t)
sk_ipca = skIPCA(n_components=n_components)
X = cp.asnumpy(X)
for i in range(0, nrows, sample_size):
sk_ipca.partial_fit(X[i:i + sample_size].copy())
sk_t = sk_ipca.transform(X)
sk_inv = sk_ipca.inverse_transform(sk_t)
assert array_equal(cu_inv, sk_inv,
5e-5, with_sign=True)
| 31.592593
| 77
| 0.656506
|
d6c7d96356b39542086e801a542fa98187043e4d
| 5,530
|
py
|
Python
|
tungsten_tempest_plugin/tests/api/contrail/test_database.py
|
hemcreddy/tungstenfabric-tungsten-tempest
|
ce646a563c291dfdd0e57a7f0f11d5ca818e3143
|
[
"Apache-2.0"
] | null | null | null |
tungsten_tempest_plugin/tests/api/contrail/test_database.py
|
hemcreddy/tungstenfabric-tungsten-tempest
|
ce646a563c291dfdd0e57a7f0f11d5ca818e3143
|
[
"Apache-2.0"
] | null | null | null |
tungsten_tempest_plugin/tests/api/contrail/test_database.py
|
hemcreddy/tungstenfabric-tungsten-tempest
|
ce646a563c291dfdd0e57a7f0f11d5ca818e3143
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tempest test-case to test database objects using RBAC roles
"""
from oslo_log import log as logging
from patrole_tempest_plugin import rbac_rule_validation
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tungsten_tempest_plugin.tests.api.contrail import rbac_base
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ContrailDatabaseTest(rbac_base.BaseContrailTest):
"""Test class to test database objects using RBAC roles"""
def _create_global_system_config(self):
config_name = data_utils.rand_name('test-config')
parent_type = 'config-root'
config_fq_name = [config_name]
new_config = \
self.config_client.create_global_system_configs(
parent_type=parent_type,
display_name=config_name,
fq_name=config_fq_name)['global-system-config']
self.addCleanup(self._try_delete_resource,
(self.config_client.
delete_global_system_config),
new_config['uuid'])
return new_config
def _delete_database_node(self, db_node_id):
return self.db_client.delete_database_node(db_node_id)
def _create_database_node(self, global_system_config):
name = data_utils.rand_name('database')
fq_name = [global_system_config, name]
database_node_ip_address = '1.1.1.1'
parent_type = 'global-system-config'
db_node = self.db_client.create_databse_nodes(
display_name=name,
database_node_ip_address=database_node_ip_address,
fq_name=fq_name,
parent_type=parent_type)['database-node']
self.addCleanup(self._try_delete_resource,
self._delete_database_node,
db_node['uuid'])
return db_node
@rbac_rule_validation.action(service="Contrail",
rules=["list_database_nodes"])
@decorators.idempotent_id('5ae6f965-6161-443f-b19e-dfa7b364c533')
def test_list_database_nodes(self):
"""test method for list database objects"""
# Create global system config
global_system_config = self._create_global_system_config()['name']
self._create_database_node(global_system_config)
with self.rbac_utils.override_role(self):
self.db_client.list_database_nodes()
@rbac_rule_validation.action(service="Contrail",
rules=["show_database_node"])
@decorators.idempotent_id('4a07d9a8-7b99-43bd-b628-06c023993aab')
def test_show_database_node(self):
"""test method for show database objects"""
# Create global system config
global_system_config = self._create_global_system_config()['name']
db_node = self._create_database_node(global_system_config)
db_node_id = db_node['uuid']
with self.rbac_utils.override_role(self):
self.db_client.show_database_node(db_node_id)
@rbac_rule_validation.action(service="Contrail",
rules=["create_database_nodes"])
@decorators.idempotent_id('b9aa9c6b-9381-44f0-94fb-e4523bf2a87e')
def test_create_database_nodes(self):
"""test method for update database objects"""
# Create global system config
global_system_config = self._create_global_system_config()['name']
with self.rbac_utils.override_role(self):
self._create_database_node(global_system_config)
@rbac_rule_validation.action(service="Contrail",
rules=["update_database_node"])
@decorators.idempotent_id('6e59f393-0e55-4327-871e-7f0ad53f2e17')
def test_update_database_node(self):
"""test method for update database objects"""
# Create global system config
global_system_config = self._create_global_system_config()['name']
db_node = self._create_database_node(global_system_config)
db_node_id = db_node['uuid']
display_name = data_utils.rand_name('DatabaseNew')
with self.rbac_utils.override_role(self):
self.db_client.update_database_node(
db_node_id=db_node_id,
display_name=display_name)
@rbac_rule_validation.action(service="Contrail",
rules=["delete_database_node"])
@decorators.idempotent_id('0cbc5a52-d7e7-4a1c-a85d-6bf44012d99b')
def test_delete_database_node(self):
"""test method for delete database objects"""
# Create global system config
global_system_config = self._create_global_system_config()['name']
db_node = self._create_database_node(global_system_config)
db_node_id = db_node['uuid']
with self.rbac_utils.override_role(self):
self._delete_database_node(db_node_id)
| 43.203125
| 78
| 0.68264
|
72fa261f69b79322b3c73fcb199f203549225812
| 72,506
|
py
|
Python
|
state_moves.py
|
Orangeplumber/Chess-AI
|
d5baff14a1df6c01b2ca71970cf5775a0723392f
|
[
"MIT"
] | 1
|
2021-07-24T18:35:19.000Z
|
2021-07-24T18:35:19.000Z
|
training/state_moves.py
|
Orangeplumber/Chess-AI
|
d5baff14a1df6c01b2ca71970cf5775a0723392f
|
[
"MIT"
] | null | null | null |
training/state_moves.py
|
Orangeplumber/Chess-AI
|
d5baff14a1df6c01b2ca71970cf5775a0723392f
|
[
"MIT"
] | null | null | null |
MOVES={'p7': [[], [], [], [], [], [], [], [], [[16, 24], [17]], [[16], [17, 25], [18]], [[17], [18, 26], [19]], [[18], [19, 27], [20]], [[19], [20, 28], [21]], [[20], [21, 29], [22]], [[21], [22, 30], [23]], [[22], [23, 31]], [[24], [25]], [[24], [25], [26]], [[25], [26], [27]], [[26], [27], [28]], [[27], [28], [29]], [[28], [29], [30]], [[29], [30], [31]], [[30], [31]], [[32], [33]], [[32], [33], [34]], [[33], [34], [35]], [[34], [35], [36]], [[35], [36], [37]], [[36], [37], [38]], [[37], [38], [39]], [[38], [39]], [[40], [41]], [[40], [41], [42]], [[41], [42], [43]], [[42], [43], [44]], [[43], [44], [45]], [[44], [45], [46]], [[45], [46], [47]], [[46], [47]], [[48], [49]], [[48], [49], [50]], [[49], [50], [51]], [[50], [51], [52]], [[51], [52], [53]], [[52], [53], [54]], [[53], [54], [55]], [[54], [55]], [[56], [57]], [[56], [57], [58]], [[57], [58], [59]], [[58], [59], [60]], [[59], [60], [61]], [[60], [61], [62]], [[61], [62], [63]], [[62], [63]], [], [], [], [], [], [], [], []], 'b1': [[[9, 18, 27, 36, 45, 54, 63]], [[8], [10, 19, 28, 37, 46, 55]], [[9, 16], [11, 20, 29, 38, 47]], [[10, 17, 24], [12, 21, 30, 39]], [[11, 18, 25, 32], [13, 22, 31]], [[12, 19, 26, 33, 40], [14, 23]], [[13, 20, 27, 34, 41, 48], [15]], [[14, 21, 28, 35, 42, 49, 56]], [[1], [17, 26, 35, 44, 53, 62]], [[2], [0], [16], [18, 27, 36, 45, 54, 63]], [[3], [1], [17, 24], [19, 28, 37, 46, 55]], [[4], [2], [18, 25, 32], [20, 29, 38, 47]], [[5], [3], [19, 26, 33, 40], [21, 30, 39]], [[6], [4], [20, 27, 34, 41, 48], [22, 31]], [[7], [5], [21, 28, 35, 42, 49, 56], [23]], [[6], [22, 29, 36, 43, 50, 57]], [[9, 2], [25, 34, 43, 52, 61]], [[10, 3], [8], [24], [26, 35, 44, 53, 62]], [[11, 4], [9, 0], [25, 32], [27, 36, 45, 54, 63]], [[12, 5], [10, 1], [26, 33, 40], [28, 37, 46, 55]], [[13, 6], [11, 2], [27, 34, 41, 48], [29, 38, 47]], [[14, 7], [12, 3], [28, 35, 42, 49, 56], [30, 39]], [[15], [13, 4], [29, 36, 43, 50, 57], [31]], [[14, 5], [30, 37, 44, 51, 58]], [[17, 10, 3], [33, 42, 51, 60]], [[18, 11, 4], [16], [32], [34, 43, 52, 61]], [[19, 12, 5], [17, 8], [33, 40], [35, 44, 53, 62]], [[20, 13, 6], [18, 9, 0], [34, 41, 48], [36, 45, 54, 63]], [[21, 14, 7], [19, 10, 1], [35, 42, 49, 56], [37, 46, 55]], [[22, 15], [20, 11, 2], [36, 43, 50, 57], [38, 47]], [[23], [21, 12, 3], [37, 44, 51, 58], [39]], [[22, 13, 4], [38, 45, 52, 59]], [[25, 18, 11, 4], [41, 50, 59]], [[26, 19, 12, 5], [24], [40], [42, 51, 60]], [[27, 20, 13, 6], [25, 16], [41, 48], [43, 52, 61]], [[28, 21, 14, 7], [26, 17, 8], [42, 49, 56], [44, 53, 62]], [[29, 22, 15], [27, 18, 9, 0], [43, 50, 57], [45, 54, 63]], [[30, 23], [28, 19, 10, 1], [44, 51, 58], [46, 55]], [[31], [29, 20, 11, 2], [45, 52, 59], [47]], [[30, 21, 12, 3], [46, 53, 60]], [[33, 26, 19, 12, 5], [49, 58]], [[34, 27, 20, 13, 6], [32], [48], [50, 59]], [[35, 28, 21, 14, 7], [33, 24], [49, 56], [51, 60]], [[36, 29, 22, 15], [34, 25, 16], [50, 57], [52, 61]], [[37, 30, 23], [35, 26, 17, 8], [51, 58], [53, 62]], [[38, 31], [36, 27, 18, 9, 0], [52, 59], [54, 63]], [[39], [37, 28, 19, 10, 1], [53, 60], [55]], [[38, 29, 20, 11, 2], [54, 61]], [[41, 34, 27, 20, 13, 6], [57]], [[42, 35, 28, 21, 14, 7], [40], [56], [58]], [[43, 36, 29, 22, 15], [41, 32], [57], [59]], [[44, 37, 30, 23], [42, 33, 24], [58], [60]], [[45, 38, 31], [43, 34, 25, 16], [59], [61]], [[46, 39], [44, 35, 26, 17, 8], [60], [62]], [[47], [45, 36, 27, 18, 9, 0], [61], [63]], [[46, 37, 28, 19, 10, 1], [62]], [[49, 42, 35, 28, 21, 14, 7]], [[50, 43, 36, 29, 22, 15], [48]], [[51, 44, 37, 30, 23], [49, 40]], [[52, 45, 38, 31], [50, 41, 32]], [[53, 46, 39], [51, 42, 33, 24]], [[54, 47], [52, 43, 34, 25, 16]], [[55], [53, 44, 35, 26, 17, 8]], [[54, 45, 36, 27, 18, 9, 0]]], 'b2': [[[9, 18, 27, 36, 45, 54, 63]], [[8], [10, 19, 28, 37, 46, 55]], [[9, 16], [11, 20, 29, 38, 47]], [[10, 17, 24], [12, 21, 30, 39]], [[11, 18, 25, 32], [13, 22, 31]], [[12, 19, 26, 33, 40], [14, 23]], [[13, 20, 27, 34, 41, 48], [15]], [[14, 21, 28, 35, 42, 49, 56]], [[1], [17, 26, 35, 44, 53, 62]], [[2], [0], [16], [18, 27, 36, 45, 54, 63]], [[3], [1], [17, 24], [19, 28, 37, 46, 55]], [[4], [2], [18, 25, 32], [20, 29, 38, 47]], [[5], [3], [19, 26, 33, 40], [21, 30, 39]], [[6], [4], [20, 27, 34, 41, 48], [22, 31]], [[7], [5], [21, 28, 35, 42, 49, 56], [23]], [[6], [22, 29, 36, 43, 50, 57]], [[9, 2], [25, 34, 43, 52, 61]], [[10, 3], [8], [24], [26, 35, 44, 53, 62]], [[11, 4], [9, 0], [25, 32], [27, 36, 45, 54, 63]], [[12, 5], [10, 1], [26, 33, 40], [28, 37, 46, 55]], [[13, 6], [11, 2], [27, 34, 41, 48], [29, 38, 47]], [[14, 7], [12, 3], [28, 35, 42, 49, 56], [30, 39]], [[15], [13, 4], [29, 36, 43, 50, 57], [31]], [[14, 5], [30, 37, 44, 51, 58]], [[17, 10, 3], [33, 42, 51, 60]], [[18, 11, 4], [16], [32], [34, 43, 52, 61]], [[19, 12, 5], [17, 8], [33, 40], [35, 44, 53, 62]], [[20, 13, 6], [18, 9, 0], [34, 41, 48], [36, 45, 54, 63]], [[21, 14, 7], [19, 10, 1], [35, 42, 49, 56], [37, 46, 55]], [[22, 15], [20, 11, 2], [36, 43, 50, 57], [38, 47]], [[23], [21, 12, 3], [37, 44, 51, 58], [39]], [[22, 13, 4], [38, 45, 52, 59]], [[25, 18, 11, 4], [41, 50, 59]], [[26, 19, 12, 5], [24], [40], [42, 51, 60]], [[27, 20, 13, 6], [25, 16], [41, 48], [43, 52, 61]], [[28, 21, 14, 7], [26, 17, 8], [42, 49, 56], [44, 53, 62]], [[29, 22, 15], [27, 18, 9, 0], [43, 50, 57], [45, 54, 63]], [[30, 23], [28, 19, 10, 1], [44, 51, 58], [46, 55]], [[31], [29, 20, 11, 2], [45, 52, 59], [47]], [[30, 21, 12, 3], [46, 53, 60]], [[33, 26, 19, 12, 5], [49, 58]], [[34, 27, 20, 13, 6], [32], [48], [50, 59]], [[35, 28, 21, 14, 7], [33, 24], [49, 56], [51, 60]], [[36, 29, 22, 15], [34, 25, 16], [50, 57], [52, 61]], [[37, 30, 23], [35, 26, 17, 8], [51, 58], [53, 62]], [[38, 31], [36, 27, 18, 9, 0], [52, 59], [54, 63]], [[39], [37, 28, 19, 10, 1], [53, 60], [55]], [[38, 29, 20, 11, 2], [54, 61]], [[41, 34, 27, 20, 13, 6], [57]], [[42, 35, 28, 21, 14, 7], [40], [56], [58]], [[43, 36, 29, 22, 15], [41, 32], [57], [59]], [[44, 37, 30, 23], [42, 33, 24], [58], [60]], [[45, 38, 31], [43, 34, 25, 16], [59], [61]], [[46, 39], [44, 35, 26, 17, 8], [60], [62]], [[47], [45, 36, 27, 18, 9, 0], [61], [63]], [[46, 37, 28, 19, 10, 1], [62]], [[49, 42, 35, 28, 21, 14, 7]], [[50, 43, 36, 29, 22, 15], [48]], [[51, 44, 37, 30, 23], [49, 40]], [[52, 45, 38, 31], [50, 41, 32]], [[53, 46, 39], [51, 42, 33, 24]], [[54, 47], [52, 43, 34, 25, 16]], [[55], [53, 44, 35, 26, 17, 8]], [[54, 45, 36, 27, 18, 9, 0]]], 'R1': [[[1, 2, 3, 4, 5, 6, 7], [8, 16, 24, 32, 40, 48, 56]], [[2, 3, 4, 5, 6, 7], [0], [9, 17, 25, 33, 41, 49, 57]], [[3, 4, 5, 6, 7], [1, 0], [10, 18, 26, 34, 42, 50, 58]], [[4, 5, 6, 7], [2, 1, 0], [11, 19, 27, 35, 43, 51, 59]], [[5, 6, 7], [3, 2, 1, 0], [12, 20, 28, 36, 44, 52, 60]], [[6, 7], [4, 3, 2, 1, 0], [13, 21, 29, 37, 45, 53, 61]], [[7], [5, 4, 3, 2, 1, 0], [14, 22, 30, 38, 46, 54, 62]], [[6, 5, 4, 3, 2, 1, 0], [15, 23, 31, 39, 47, 55, 63]], [[9, 10, 11, 12, 13, 14, 15], [0], [16, 24, 32, 40, 48, 56]], [[10, 11, 12, 13, 14, 15], [1], [8], [17, 25, 33, 41, 49, 57]], [[11, 12, 13, 14, 15], [2], [9, 8], [18, 26, 34, 42, 50, 58]], [[12, 13, 14, 15], [3], [10, 9, 8], [19, 27, 35, 43, 51, 59]], [[13, 14, 15], [4], [11, 10, 9, 8], [20, 28, 36, 44, 52, 60]], [[14, 15], [5], [12, 11, 10, 9, 8], [21, 29, 37, 45, 53, 61]], [[15], [6], [13, 12, 11, 10, 9, 8], [22, 30, 38, 46, 54, 62]], [[7], [14, 13, 12, 11, 10, 9, 8], [23, 31, 39, 47, 55, 63]], [[17, 18, 19, 20, 21, 22, 23], [8, 0], [24, 32, 40, 48, 56]], [[18, 19, 20, 21, 22, 23], [9, 1], [16], [25, 33, 41, 49, 57]], [[19, 20, 21, 22, 23], [10, 2], [17, 16], [26, 34, 42, 50, 58]], [[20, 21, 22, 23], [11, 3], [18, 17, 16], [27, 35, 43, 51, 59]], [[21, 22, 23], [12, 4], [19, 18, 17, 16], [28, 36, 44, 52, 60]], [[22, 23], [13, 5], [20, 19, 18, 17, 16], [29, 37, 45, 53, 61]], [[23], [14, 6], [21, 20, 19, 18, 17, 16], [30, 38, 46, 54, 62]], [[15, 7], [22, 21, 20, 19, 18, 17, 16], [31, 39, 47, 55, 63]], [[25, 26, 27, 28, 29, 30, 31], [16, 8, 0], [32, 40, 48, 56]], [[26, 27, 28, 29, 30, 31], [17, 9, 1], [24], [33, 41, 49, 57]], [[27, 28, 29, 30, 31], [18, 10, 2], [25, 24], [34, 42, 50, 58]], [[28, 29, 30, 31], [19, 11, 3], [26, 25, 24], [35, 43, 51, 59]], [[29, 30, 31], [20, 12, 4], [27, 26, 25, 24], [36, 44, 52, 60]], [[30, 31], [21, 13, 5], [28, 27, 26, 25, 24], [37, 45, 53, 61]], [[31], [22, 14, 6], [29, 28, 27, 26, 25, 24], [38, 46, 54, 62]], [[23, 15, 7], [30, 29, 28, 27, 26, 25, 24], [39, 47, 55, 63]], [[33, 34, 35, 36, 37, 38, 39], [24, 16, 8, 0], [40, 48, 56]], [[34, 35, 36, 37, 38, 39], [25, 17, 9, 1], [32], [41, 49, 57]], [[35, 36, 37, 38, 39], [26, 18, 10, 2], [33, 32], [42, 50, 58]], [[36, 37, 38, 39], [27, 19, 11, 3], [34, 33, 32], [43, 51, 59]], [[37, 38, 39], [28, 20, 12, 4], [35, 34, 33, 32], [44, 52, 60]], [[38, 39], [29, 21, 13, 5], [36, 35, 34, 33, 32], [45, 53, 61]], [[39], [30, 22, 14, 6], [37, 36, 35, 34, 33, 32], [46, 54, 62]], [[31, 23, 15, 7], [38, 37, 36, 35, 34, 33, 32], [47, 55, 63]], [[41, 42, 43, 44, 45, 46, 47], [32, 24, 16, 8, 0], [48, 56]], [[42, 43, 44, 45, 46, 47], [33, 25, 17, 9, 1], [40], [49, 57]], [[43, 44, 45, 46, 47], [34, 26, 18, 10, 2], [41, 40], [50, 58]], [[44, 45, 46, 47], [35, 27, 19, 11, 3], [42, 41, 40], [51, 59]], [[45, 46, 47], [36, 28, 20, 12, 4], [43, 42, 41, 40], [52, 60]], [[46, 47], [37, 29, 21, 13, 5], [44, 43, 42, 41, 40], [53, 61]], [[47], [38, 30, 22, 14, 6], [45, 44, 43, 42, 41, 40], [54, 62]], [[39, 31, 23, 15, 7], [46, 45, 44, 43, 42, 41, 40], [55, 63]], [[49, 50, 51, 52, 53, 54, 55], [40, 32, 24, 16, 8, 0], [56]], [[50, 51, 52, 53, 54, 55], [41, 33, 25, 17, 9, 1], [48], [57]], [[51, 52, 53, 54, 55], [42, 34, 26, 18, 10, 2], [49, 48], [58]], [[52, 53, 54, 55], [43, 35, 27, 19, 11, 3], [50, 49, 48], [59]], [[53, 54, 55], [44, 36, 28, 20, 12, 4], [51, 50, 49, 48], [60]], [[54, 55], [45, 37, 29, 21, 13, 5], [52, 51, 50, 49, 48], [61]], [[55], [46, 38, 30, 22, 14, 6], [53, 52, 51, 50, 49, 48], [62]], [[47, 39, 31, 23, 15, 7], [54, 53, 52, 51, 50, 49, 48], [63]], [[57, 58, 59, 60, 61, 62, 63], [48, 40, 32, 24, 16, 8, 0]], [[58, 59, 60, 61, 62, 63], [49, 41, 33, 25, 17, 9, 1], [56]], [[59, 60, 61, 62, 63], [50, 42, 34, 26, 18, 10, 2], [57, 56]], [[60, 61, 62, 63], [51, 43, 35, 27, 19, 11, 3], [58, 57, 56]], [[61, 62, 63], [52, 44, 36, 28, 20, 12, 4], [59, 58, 57, 56]], [[62, 63], [53, 45, 37, 29, 21, 13, 5], [60, 59, 58, 57, 56]], [[63], [54, 46, 38, 30, 22, 14, 6], [61, 60, 59, 58, 57, 56]], [[55, 47, 39, 31, 23, 15, 7], [62, 61, 60, 59, 58, 57, 56]]], 'R2': [[[1, 2, 3, 4, 5, 6, 7], [8, 16, 24, 32, 40, 48, 56]], [[2, 3, 4, 5, 6, 7], [0], [9, 17, 25, 33, 41, 49, 57]], [[3, 4, 5, 6, 7], [1, 0], [10, 18, 26, 34, 42, 50, 58]], [[4, 5, 6, 7], [2, 1, 0], [11, 19, 27, 35, 43, 51, 59]], [[5, 6, 7], [3, 2, 1, 0], [12, 20, 28, 36, 44, 52, 60]], [[6, 7], [4, 3, 2, 1, 0], [13, 21, 29, 37, 45, 53, 61]], [[7], [5, 4, 3, 2, 1, 0], [14, 22, 30, 38, 46, 54, 62]], [[6, 5, 4, 3, 2, 1, 0], [15, 23, 31, 39, 47, 55, 63]], [[9, 10, 11, 12, 13, 14, 15], [0], [16, 24, 32, 40, 48, 56]], [[10, 11, 12, 13, 14, 15], [1], [8], [17, 25, 33, 41, 49, 57]], [[11, 12, 13, 14, 15], [2], [9, 8], [18, 26, 34, 42, 50, 58]], [[12, 13, 14, 15], [3], [10, 9, 8], [19, 27, 35, 43, 51, 59]], [[13, 14, 15], [4], [11, 10, 9, 8], [20, 28, 36, 44, 52, 60]], [[14, 15], [5], [12, 11, 10, 9, 8], [21, 29, 37, 45, 53, 61]], [[15], [6], [13, 12, 11, 10, 9, 8], [22, 30, 38, 46, 54, 62]], [[7], [14, 13, 12, 11, 10, 9, 8], [23, 31, 39, 47, 55, 63]], [[17, 18, 19, 20, 21, 22, 23], [8, 0], [24, 32, 40, 48, 56]], [[18, 19, 20, 21, 22, 23], [9, 1], [16], [25, 33, 41, 49, 57]], [[19, 20, 21, 22, 23], [10, 2], [17, 16], [26, 34, 42, 50, 58]], [[20, 21, 22, 23], [11, 3], [18, 17, 16], [27, 35, 43, 51, 59]], [[21, 22, 23], [12, 4], [19, 18, 17, 16], [28, 36, 44, 52, 60]], [[22, 23], [13, 5], [20, 19, 18, 17, 16], [29, 37, 45, 53, 61]], [[23], [14, 6], [21, 20, 19, 18, 17, 16], [30, 38, 46, 54, 62]], [[15, 7], [22, 21, 20, 19, 18, 17, 16], [31, 39, 47, 55, 63]], [[25, 26, 27, 28, 29, 30, 31], [16, 8, 0], [32, 40, 48, 56]], [[26, 27, 28, 29, 30, 31], [17, 9, 1], [24], [33, 41, 49, 57]], [[27, 28, 29, 30, 31], [18, 10, 2], [25, 24], [34, 42, 50, 58]], [[28, 29, 30, 31], [19, 11, 3], [26, 25, 24], [35, 43, 51, 59]], [[29, 30, 31], [20, 12, 4], [27, 26, 25, 24], [36, 44, 52, 60]], [[30, 31], [21, 13, 5], [28, 27, 26, 25, 24], [37, 45, 53, 61]], [[31], [22, 14, 6], [29, 28, 27, 26, 25, 24], [38, 46, 54, 62]], [[23, 15, 7], [30, 29, 28, 27, 26, 25, 24], [39, 47, 55, 63]], [[33, 34, 35, 36, 37, 38, 39], [24, 16, 8, 0], [40, 48, 56]], [[34, 35, 36, 37, 38, 39], [25, 17, 9, 1], [32], [41, 49, 57]], [[35, 36, 37, 38, 39], [26, 18, 10, 2], [33, 32], [42, 50, 58]], [[36, 37, 38, 39], [27, 19, 11, 3], [34, 33, 32], [43, 51, 59]], [[37, 38, 39], [28, 20, 12, 4], [35, 34, 33, 32], [44, 52, 60]], [[38, 39], [29, 21, 13, 5], [36, 35, 34, 33, 32], [45, 53, 61]], [[39], [30, 22, 14, 6], [37, 36, 35, 34, 33, 32], [46, 54, 62]], [[31, 23, 15, 7], [38, 37, 36, 35, 34, 33, 32], [47, 55, 63]], [[41, 42, 43, 44, 45, 46, 47], [32, 24, 16, 8, 0], [48, 56]], [[42, 43, 44, 45, 46, 47], [33, 25, 17, 9, 1], [40], [49, 57]], [[43, 44, 45, 46, 47], [34, 26, 18, 10, 2], [41, 40], [50, 58]], [[44, 45, 46, 47], [35, 27, 19, 11, 3], [42, 41, 40], [51, 59]], [[45, 46, 47], [36, 28, 20, 12, 4], [43, 42, 41, 40], [52, 60]], [[46, 47], [37, 29, 21, 13, 5], [44, 43, 42, 41, 40], [53, 61]], [[47], [38, 30, 22, 14, 6], [45, 44, 43, 42, 41, 40], [54, 62]], [[39, 31, 23, 15, 7], [46, 45, 44, 43, 42, 41, 40], [55, 63]], [[49, 50, 51, 52, 53, 54, 55], [40, 32, 24, 16, 8, 0], [56]], [[50, 51, 52, 53, 54, 55], [41, 33, 25, 17, 9, 1], [48], [57]], [[51, 52, 53, 54, 55], [42, 34, 26, 18, 10, 2], [49, 48], [58]], [[52, 53, 54, 55], [43, 35, 27, 19, 11, 3], [50, 49, 48], [59]], [[53, 54, 55], [44, 36, 28, 20, 12, 4], [51, 50, 49, 48], [60]], [[54, 55], [45, 37, 29, 21, 13, 5], [52, 51, 50, 49, 48], [61]], [[55], [46, 38, 30, 22, 14, 6], [53, 52, 51, 50, 49, 48], [62]], [[47, 39, 31, 23, 15, 7], [54, 53, 52, 51, 50, 49, 48], [63]], [[57, 58, 59, 60, 61, 62, 63], [48, 40, 32, 24, 16, 8, 0]], [[58, 59, 60, 61, 62, 63], [49, 41, 33, 25, 17, 9, 1], [56]], [[59, 60, 61, 62, 63], [50, 42, 34, 26, 18, 10, 2], [57, 56]], [[60, 61, 62, 63], [51, 43, 35, 27, 19, 11, 3], [58, 57, 56]], [[61, 62, 63], [52, 44, 36, 28, 20, 12, 4], [59, 58, 57, 56]], [[62, 63], [53, 45, 37, 29, 21, 13, 5], [60, 59, 58, 57, 56]], [[63], [54, 46, 38, 30, 22, 14, 6], [61, 60, 59, 58, 57, 56]], [[55, 47, 39, 31, 23, 15, 7], [62, 61, 60, 59, 58, 57, 56]]], 'K': [[[1], [8], [9]], [[2], [0], [8], [9], [10]], [[3], [1], [9], [10], [11]], [[4], [2], [10], [11], [12]], [[5], [3], [11], [12], [13]], [[6], [4], [12], [13], [14]], [[7], [5], [13], [14], [15]], [[6], [14], [15]], [[9], [1], [0], [16], [17]], [[10], [2], [1], [0], [8], [16], [17], [18]], [[11], [3], [2], [1], [9], [17], [18], [19]], [[12], [4], [3], [2], [10], [18], [19], [20]], [[13], [5], [4], [3], [11], [19], [20], [21]], [[14], [6], [5], [4], [12], [20], [21], [22]], [[15], [7], [6], [5], [13], [21], [22], [23]], [[7], [6], [14], [22], [23]], [[17], [9], [8], [24], [25]], [[18], [10], [9], [8], [16], [24], [25], [26]], [[19], [11], [10], [9], [17], [25], [26], [27]], [[20], [12], [11], [10], [18], [26], [27], [28]], [[21], [13], [12], [11], [19], [27], [28], [29]], [[22], [14], [13], [12], [20], [28], [29], [30]], [[23], [15], [14], [13], [21], [29], [30], [31]], [[15], [14], [22], [30], [31]], [[25], [17], [16], [32], [33]], [[26], [18], [17], [16], [24], [32], [33], [34]], [[27], [19], [18], [17], [25], [33], [34], [35]], [[28], [20], [19], [18], [26], [34], [35], [36]], [[29], [21], [20], [19], [27], [35], [36], [37]], [[30], [22], [21], [20], [28], [36], [37], [38]], [[31], [23], [22], [21], [29], [37], [38], [39]], [[23], [22], [30], [38], [39]], [[33], [25], [24], [40], [41]], [[34], [26], [25], [24], [32], [40], [41], [42]], [[35], [27], [26], [25], [33], [41], [42], [43]], [[36], [28], [27], [26], [34], [42], [43], [44]], [[37], [29], [28], [27], [35], [43], [44], [45]], [[38], [30], [29], [28], [36], [44], [45], [46]], [[39], [31], [30], [29], [37], [45], [46], [47]], [[31], [30], [38], [46], [47]], [[41], [33], [32], [48], [49]], [[42], [34], [33], [32], [40], [48], [49], [50]], [[43], [35], [34], [33], [41], [49], [50], [51]], [[44], [36], [35], [34], [42], [50], [51], [52]], [[45], [37], [36], [35], [43], [51], [52], [53]], [[46], [38], [37], [36], [44], [52], [53], [54]], [[47], [39], [38], [37], [45], [53], [54], [55]], [[39], [38], [46], [54], [55]], [[49], [41], [40], [56], [57]], [[50], [42], [41], [40], [48], [56], [57], [58]], [[51], [43], [42], [41], [49], [57], [58], [59]], [[52], [44], [43], [42], [50], [58], [59], [60]], [[53], [45], [44], [43], [51], [59], [60], [61]], [[54], [46], [45], [44], [52], [60], [61], [62]], [[55], [47], [46], [45], [53], [61], [62], [63]], [[47], [46], [54], [62], [63]], [[57], [49], [48]], [[58], [50], [49], [48], [56]], [[59], [51], [50], [49], [57]], [[60], [52], [51], [50], [58]], [[61, 62], [53], [52], [51], [59, 58]], [[62], [54], [53], [52], [60]], [[63], [55], [54], [53], [61]], [[55], [54], [62]]], 'P2': [[], [], [], [], [], [], [], [], [[1], [0]], [[2], [1], [0]], [[3], [2], [1]], [[4], [3], [2]], [[5], [4], [3]], [[6], [5], [4]], [[7], [6], [5]], [[7], [6]], [[9], [8]], [[10], [9], [8]], [[11], [10], [9]], [[12], [11], [10]], [[13], [12], [11]], [[14], [13], [12]], [[15], [14], [13]], [[15], [14]], [[17], [16]], [[18], [17], [16]], [[19], [18], [17]], [[20], [19], [18]], [[21], [20], [19]], [[22], [21], [20]], [[23], [22], [21]], [[23], [22]], [[25], [24]], [[26], [25], [24]], [[27], [26], [25]], [[28], [27], [26]], [[29], [28], [27]], [[30], [29], [28]], [[31], [30], [29]], [[31], [30]], [[33], [32]], [[34], [33], [32]], [[35], [34], [33]], [[36], [35], [34]], [[37], [36], [35]], [[38], [37], [36]], [[39], [38], [37]], [[39], [38]], [[41], [40, 32]], [[42], [41, 33], [40]], [[43], [42, 34], [41]], [[44], [43, 35], [42]], [[45], [44, 36], [43]], [[46], [45, 37], [44]], [[47], [46, 38], [45]], [[47, 39], [46]], [], [], [], [], [], [], [], []], 'P3': [[], [], [], [], [], [], [], [], [[1], [0]], [[2], [1], [0]], [[3], [2], [1]], [[4], [3], [2]], [[5], [4], [3]], [[6], [5], [4]], [[7], [6], [5]], [[7], [6]], [[9], [8]], [[10], [9], [8]], [[11], [10], [9]], [[12], [11], [10]], [[13], [12], [11]], [[14], [13], [12]], [[15], [14], [13]], [[15], [14]], [[17], [16]], [[18], [17], [16]], [[19], [18], [17]], [[20], [19], [18]], [[21], [20], [19]], [[22], [21], [20]], [[23], [22], [21]], [[23], [22]], [[25], [24]], [[26], [25], [24]], [[27], [26], [25]], [[28], [27], [26]], [[29], [28], [27]], [[30], [29], [28]], [[31], [30], [29]], [[31], [30]], [[33], [32]], [[34], [33], [32]], [[35], [34], [33]], [[36], [35], [34]], [[37], [36], [35]], [[38], [37], [36]], [[39], [38], [37]], [[39], [38]], [[41], [40, 32]], [[42], [41, 33], [40]], [[43], [42, 34], [41]], [[44], [43, 35], [42]], [[45], [44, 36], [43]], [[46], [45, 37], [44]], [[47], [46, 38], [45]], [[47, 39], [46]], [], [], [], [], [], [], [], []], 'P1': [[], [], [], [], [], [], [], [], [[1], [0]], [[2], [1], [0]], [[3], [2], [1]], [[4], [3], [2]], [[5], [4], [3]], [[6], [5], [4]], [[7], [6], [5]], [[7], [6]], [[9], [8]], [[10], [9], [8]], [[11], [10], [9]], [[12], [11], [10]], [[13], [12], [11]], [[14], [13], [12]], [[15], [14], [13]], [[15], [14]], [[17], [16]], [[18], [17], [16]], [[19], [18], [17]], [[20], [19], [18]], [[21], [20], [19]], [[22], [21], [20]], [[23], [22], [21]], [[23], [22]], [[25], [24]], [[26], [25], [24]], [[27], [26], [25]], [[28], [27], [26]], [[29], [28], [27]], [[30], [29], [28]], [[31], [30], [29]], [[31], [30]], [[33], [32]], [[34], [33], [32]], [[35], [34], [33]], [[36], [35], [34]], [[37], [36], [35]], [[38], [37], [36]], [[39], [38], [37]], [[39], [38]], [[41], [40, 32]], [[42], [41, 33], [40]], [[43], [42, 34], [41]], [[44], [43, 35], [42]], [[45], [44, 36], [43]], [[46], [45, 37], [44]], [[47], [46, 38], [45]], [[47, 39], [46]], [], [], [], [], [], [], [], []], 'P6': [[], [], [], [], [], [], [], [], [[1], [0]], [[2], [1], [0]], [[3], [2], [1]], [[4], [3], [2]], [[5], [4], [3]], [[6], [5], [4]], [[7], [6], [5]], [[7], [6]], [[9], [8]], [[10], [9], [8]], [[11], [10], [9]], [[12], [11], [10]], [[13], [12], [11]], [[14], [13], [12]], [[15], [14], [13]], [[15], [14]], [[17], [16]], [[18], [17], [16]], [[19], [18], [17]], [[20], [19], [18]], [[21], [20], [19]], [[22], [21], [20]], [[23], [22], [21]], [[23], [22]], [[25], [24]], [[26], [25], [24]], [[27], [26], [25]], [[28], [27], [26]], [[29], [28], [27]], [[30], [29], [28]], [[31], [30], [29]], [[31], [30]], [[33], [32]], [[34], [33], [32]], [[35], [34], [33]], [[36], [35], [34]], [[37], [36], [35]], [[38], [37], [36]], [[39], [38], [37]], [[39], [38]], [[41], [40, 32]], [[42], [41, 33], [40]], [[43], [42, 34], [41]], [[44], [43, 35], [42]], [[45], [44, 36], [43]], [[46], [45, 37], [44]], [[47], [46, 38], [45]], [[47, 39], [46]], [], [], [], [], [], [], [], []], 'P7': [[], [], [], [], [], [], [], [], [[1], [0]], [[2], [1], [0]], [[3], [2], [1]], [[4], [3], [2]], [[5], [4], [3]], [[6], [5], [4]], [[7], [6], [5]], [[7], [6]], [[9], [8]], [[10], [9], [8]], [[11], [10], [9]], [[12], [11], [10]], [[13], [12], [11]], [[14], [13], [12]], [[15], [14], [13]], [[15], [14]], [[17], [16]], [[18], [17], [16]], [[19], [18], [17]], [[20], [19], [18]], [[21], [20], [19]], [[22], [21], [20]], [[23], [22], [21]], [[23], [22]], [[25], [24]], [[26], [25], [24]], [[27], [26], [25]], [[28], [27], [26]], [[29], [28], [27]], [[30], [29], [28]], [[31], [30], [29]], [[31], [30]], [[33], [32]], [[34], [33], [32]], [[35], [34], [33]], [[36], [35], [34]], [[37], [36], [35]], [[38], [37], [36]], [[39], [38], [37]], [[39], [38]], [[41], [40, 32]], [[42], [41, 33], [40]], [[43], [42, 34], [41]], [[44], [43, 35], [42]], [[45], [44, 36], [43]], [[46], [45, 37], [44]], [[47], [46, 38], [45]], [[47, 39], [46]], [], [], [], [], [], [], [], []], 'P4': [[], [], [], [], [], [], [], [], [[1], [0]], [[2], [1], [0]], [[3], [2], [1]], [[4], [3], [2]], [[5], [4], [3]], [[6], [5], [4]], [[7], [6], [5]], [[7], [6]], [[9], [8]], [[10], [9], [8]], [[11], [10], [9]], [[12], [11], [10]], [[13], [12], [11]], [[14], [13], [12]], [[15], [14], [13]], [[15], [14]], [[17], [16]], [[18], [17], [16]], [[19], [18], [17]], [[20], [19], [18]], [[21], [20], [19]], [[22], [21], [20]], [[23], [22], [21]], [[23], [22]], [[25], [24]], [[26], [25], [24]], [[27], [26], [25]], [[28], [27], [26]], [[29], [28], [27]], [[30], [29], [28]], [[31], [30], [29]], [[31], [30]], [[33], [32]], [[34], [33], [32]], [[35], [34], [33]], [[36], [35], [34]], [[37], [36], [35]], [[38], [37], [36]], [[39], [38], [37]], [[39], [38]], [[41], [40, 32]], [[42], [41, 33], [40]], [[43], [42, 34], [41]], [[44], [43, 35], [42]], [[45], [44, 36], [43]], [[46], [45, 37], [44]], [[47], [46, 38], [45]], [[47, 39], [46]], [], [], [], [], [], [], [], []], 'P5': [[], [], [], [], [], [], [], [], [[1], [0]], [[2], [1], [0]], [[3], [2], [1]], [[4], [3], [2]], [[5], [4], [3]], [[6], [5], [4]], [[7], [6], [5]], [[7], [6]], [[9], [8]], [[10], [9], [8]], [[11], [10], [9]], [[12], [11], [10]], [[13], [12], [11]], [[14], [13], [12]], [[15], [14], [13]], [[15], [14]], [[17], [16]], [[18], [17], [16]], [[19], [18], [17]], [[20], [19], [18]], [[21], [20], [19]], [[22], [21], [20]], [[23], [22], [21]], [[23], [22]], [[25], [24]], [[26], [25], [24]], [[27], [26], [25]], [[28], [27], [26]], [[29], [28], [27]], [[30], [29], [28]], [[31], [30], [29]], [[31], [30]], [[33], [32]], [[34], [33], [32]], [[35], [34], [33]], [[36], [35], [34]], [[37], [36], [35]], [[38], [37], [36]], [[39], [38], [37]], [[39], [38]], [[41], [40, 32]], [[42], [41, 33], [40]], [[43], [42, 34], [41]], [[44], [43, 35], [42]], [[45], [44, 36], [43]], [[46], [45, 37], [44]], [[47], [46, 38], [45]], [[47, 39], [46]], [], [], [], [], [], [], [], []], 'P8': [[], [], [], [], [], [], [], [], [[1], [0]], [[2], [1], [0]], [[3], [2], [1]], [[4], [3], [2]], [[5], [4], [3]], [[6], [5], [4]], [[7], [6], [5]], [[7], [6]], [[9], [8]], [[10], [9], [8]], [[11], [10], [9]], [[12], [11], [10]], [[13], [12], [11]], [[14], [13], [12]], [[15], [14], [13]], [[15], [14]], [[17], [16]], [[18], [17], [16]], [[19], [18], [17]], [[20], [19], [18]], [[21], [20], [19]], [[22], [21], [20]], [[23], [22], [21]], [[23], [22]], [[25], [24]], [[26], [25], [24]], [[27], [26], [25]], [[28], [27], [26]], [[29], [28], [27]], [[30], [29], [28]], [[31], [30], [29]], [[31], [30]], [[33], [32]], [[34], [33], [32]], [[35], [34], [33]], [[36], [35], [34]], [[37], [36], [35]], [[38], [37], [36]], [[39], [38], [37]], [[39], [38]], [[41], [40, 32]], [[42], [41, 33], [40]], [[43], [42, 34], [41]], [[44], [43, 35], [42]], [[45], [44, 36], [43]], [[46], [45, 37], [44]], [[47], [46, 38], [45]], [[47, 39], [46]], [], [], [], [], [], [], [], []], 'Q': [[[1, 2, 3, 4, 5, 6, 7], [8, 16, 24, 32, 40, 48, 56], [9, 18, 27, 36, 45, 54, 63]], [[2, 3, 4, 5, 6, 7], [0], [8], [9, 17, 25, 33, 41, 49, 57], [10, 19, 28, 37, 46, 55]], [[3, 4, 5, 6, 7], [1, 0], [9, 16], [10, 18, 26, 34, 42, 50, 58], [11, 20, 29, 38, 47]], [[4, 5, 6, 7], [2, 1, 0], [10, 17, 24], [11, 19, 27, 35, 43, 51, 59], [12, 21, 30, 39]], [[5, 6, 7], [3, 2, 1, 0], [11, 18, 25, 32], [12, 20, 28, 36, 44, 52, 60], [13, 22, 31]], [[6, 7], [4, 3, 2, 1, 0], [12, 19, 26, 33, 40], [13, 21, 29, 37, 45, 53, 61], [14, 23]], [[7], [5, 4, 3, 2, 1, 0], [13, 20, 27, 34, 41, 48], [14, 22, 30, 38, 46, 54, 62], [15]], [[6, 5, 4, 3, 2, 1, 0], [14, 21, 28, 35, 42, 49, 56], [15, 23, 31, 39, 47, 55, 63]], [[9, 10, 11, 12, 13, 14, 15], [1], [0], [16, 24, 32, 40, 48, 56], [17, 26, 35, 44, 53, 62]], [[10, 11, 12, 13, 14, 15], [2], [1], [0], [8], [16], [17, 25, 33, 41, 49, 57], [18, 27, 36, 45, 54, 63]], [[11, 12, 13, 14, 15], [3], [2], [1], [9, 8], [17, 24], [18, 26, 34, 42, 50, 58], [19, 28, 37, 46, 55]], [[12, 13, 14, 15], [4], [3], [2], [10, 9, 8], [18, 25, 32], [19, 27, 35, 43, 51, 59], [20, 29, 38, 47]], [[13, 14, 15], [5], [4], [3], [11, 10, 9, 8], [19, 26, 33, 40], [20, 28, 36, 44, 52, 60], [21, 30, 39]], [[14, 15], [6], [5], [4], [12, 11, 10, 9, 8], [20, 27, 34, 41, 48], [21, 29, 37, 45, 53, 61], [22, 31]], [[15], [7], [6], [5], [13, 12, 11, 10, 9, 8], [21, 28, 35, 42, 49, 56], [22, 30, 38, 46, 54, 62], [23]], [[7], [6], [14, 13, 12, 11, 10, 9, 8], [22, 29, 36, 43, 50, 57], [23, 31, 39, 47, 55, 63]], [[17, 18, 19, 20, 21, 22, 23], [9, 2], [8, 0], [24, 32, 40, 48, 56], [25, 34, 43, 52, 61]], [[18, 19, 20, 21, 22, 23], [10, 3], [9, 1], [8], [16], [24], [25, 33, 41, 49, 57], [26, 35, 44, 53, 62]], [[19, 20, 21, 22, 23], [11, 4], [10, 2], [9, 0], [17, 16], [25, 32], [26, 34, 42, 50, 58], [27, 36, 45, 54, 63]], [[20, 21, 22, 23], [12, 5], [11, 3], [10, 1], [18, 17, 16], [26, 33, 40], [27, 35, 43, 51, 59], [28, 37, 46, 55]], [[21, 22, 23], [13, 6], [12, 4], [11, 2], [19, 18, 17, 16], [27, 34, 41, 48], [28, 36, 44, 52, 60], [29, 38, 47]], [[22, 23], [14, 7], [13, 5], [12, 3], [20, 19, 18, 17, 16], [28, 35, 42, 49, 56], [29, 37, 45, 53, 61], [30, 39]], [[23], [15], [14, 6], [13, 4], [21, 20, 19, 18, 17, 16], [29, 36, 43, 50, 57], [30, 38, 46, 54, 62], [31]], [[15, 7], [14, 5], [22, 21, 20, 19, 18, 17, 16], [30, 37, 44, 51, 58], [31, 39, 47, 55, 63]], [[25, 26, 27, 28, 29, 30, 31], [17, 10, 3], [16, 8, 0], [32, 40, 48, 56], [33, 42, 51, 60]], [[26, 27, 28, 29, 30, 31], [18, 11, 4], [17, 9, 1], [16], [24], [32], [33, 41, 49, 57], [34, 43, 52, 61]], [[27, 28, 29, 30, 31], [19, 12, 5], [18, 10, 2], [17, 8], [25, 24], [33, 40], [34, 42, 50, 58], [35, 44, 53, 62]], [[28, 29, 30, 31], [20, 13, 6], [19, 11, 3], [18, 9, 0], [26, 25, 24], [34, 41, 48], [35, 43, 51, 59], [36, 45, 54, 63]], [[29, 30, 31], [21, 14, 7], [20, 12, 4], [19, 10, 1], [27, 26, 25, 24], [35, 42, 49, 56], [36, 44, 52, 60], [37, 46, 55]], [[30, 31], [22, 15], [21, 13, 5], [20, 11, 2], [28, 27, 26, 25, 24], [36, 43, 50, 57], [37, 45, 53, 61], [38, 47]], [[31], [23], [22, 14, 6], [21, 12, 3], [29, 28, 27, 26, 25, 24], [37, 44, 51, 58], [38, 46, 54, 62], [39]], [[23, 15, 7], [22, 13, 4], [30, 29, 28, 27, 26, 25, 24], [38, 45, 52, 59], [39, 47, 55, 63]], [[33, 34, 35, 36, 37, 38, 39], [25, 18, 11, 4], [24, 16, 8, 0], [40, 48, 56], [41, 50, 59]], [[34, 35, 36, 37, 38, 39], [26, 19, 12, 5], [25, 17, 9, 1], [24], [32], [40], [41, 49, 57], [42, 51, 60]], [[35, 36, 37, 38, 39], [27, 20, 13, 6], [26, 18, 10, 2], [25, 16], [33, 32], [41, 48], [42, 50, 58], [43, 52, 61]], [[36, 37, 38, 39], [28, 21, 14, 7], [27, 19, 11, 3], [26, 17, 8], [34, 33, 32], [42, 49, 56], [43, 51, 59], [44, 53, 62]], [[37, 38, 39], [29, 22, 15], [28, 20, 12, 4], [27, 18, 9, 0], [35, 34, 33, 32], [43, 50, 57], [44, 52, 60], [45, 54, 63]], [[38, 39], [30, 23], [29, 21, 13, 5], [28, 19, 10, 1], [36, 35, 34, 33, 32], [44, 51, 58], [45, 53, 61], [46, 55]], [[39], [31], [30, 22, 14, 6], [29, 20, 11, 2], [37, 36, 35, 34, 33, 32], [45, 52, 59], [46, 54, 62], [47]], [[31, 23, 15, 7], [30, 21, 12, 3], [38, 37, 36, 35, 34, 33, 32], [46, 53, 60], [47, 55, 63]], [[41, 42, 43, 44, 45, 46, 47], [33, 26, 19, 12, 5], [32, 24, 16, 8, 0], [48, 56], [49, 58]], [[42, 43, 44, 45, 46, 47], [34, 27, 20, 13, 6], [33, 25, 17, 9, 1], [32], [40], [48], [49, 57], [50, 59]], [[43, 44, 45, 46, 47], [35, 28, 21, 14, 7], [34, 26, 18, 10, 2], [33, 24], [41, 40], [49, 56], [50, 58], [51, 60]], [[44, 45, 46, 47], [36, 29, 22, 15], [35, 27, 19, 11, 3], [34, 25, 16], [42, 41, 40], [50, 57], [51, 59], [52, 61]], [[45, 46, 47], [37, 30, 23], [36, 28, 20, 12, 4], [35, 26, 17, 8], [43, 42, 41, 40], [51, 58], [52, 60], [53, 62]], [[46, 47], [38, 31], [37, 29, 21, 13, 5], [36, 27, 18, 9, 0], [44, 43, 42, 41, 40], [52, 59], [53, 61], [54, 63]], [[47], [39], [38, 30, 22, 14, 6], [37, 28, 19, 10, 1], [45, 44, 43, 42, 41, 40], [53, 60], [54, 62], [55]], [[39, 31, 23, 15, 7], [38, 29, 20, 11, 2], [46, 45, 44, 43, 42, 41, 40], [54, 61], [55, 63]], [[49, 50, 51, 52, 53, 54, 55], [41, 34, 27, 20, 13, 6], [40, 32, 24, 16, 8, 0], [56], [57]], [[50, 51, 52, 53, 54, 55], [42, 35, 28, 21, 14, 7], [41, 33, 25, 17, 9, 1], [40], [48], [56], [57], [58]], [[51, 52, 53, 54, 55], [43, 36, 29, 22, 15], [42, 34, 26, 18, 10, 2], [41, 32], [49, 48], [57], [58], [59]], [[52, 53, 54, 55], [44, 37, 30, 23], [43, 35, 27, 19, 11, 3], [42, 33, 24], [50, 49, 48], [58], [59], [60]], [[53, 54, 55], [45, 38, 31], [44, 36, 28, 20, 12, 4], [43, 34, 25, 16], [51, 50, 49, 48], [59], [60], [61]], [[54, 55], [46, 39], [45, 37, 29, 21, 13, 5], [44, 35, 26, 17, 8], [52, 51, 50, 49, 48], [60], [61], [62]], [[55], [47], [46, 38, 30, 22, 14, 6], [45, 36, 27, 18, 9, 0], [53, 52, 51, 50, 49, 48], [61], [62], [63]], [[47, 39, 31, 23, 15, 7], [46, 37, 28, 19, 10, 1], [54, 53, 52, 51, 50, 49, 48], [62], [63]], [[57, 58, 59, 60, 61, 62, 63], [49, 42, 35, 28, 21, 14, 7], [48, 40, 32, 24, 16, 8, 0]], [[58, 59, 60, 61, 62, 63], [50, 43, 36, 29, 22, 15], [49, 41, 33, 25, 17, 9, 1], [48], [56]], [[59, 60, 61, 62, 63], [51, 44, 37, 30, 23], [50, 42, 34, 26, 18, 10, 2], [49, 40], [57, 56]], [[60, 61, 62, 63], [52, 45, 38, 31], [51, 43, 35, 27, 19, 11, 3], [50, 41, 32], [58, 57, 56]], [[61, 62, 63], [53, 46, 39], [52, 44, 36, 28, 20, 12, 4], [51, 42, 33, 24], [59, 58, 57, 56]], [[62, 63], [54, 47], [53, 45, 37, 29, 21, 13, 5], [52, 43, 34, 25, 16], [60, 59, 58, 57, 56]], [[63], [55], [54, 46, 38, 30, 22, 14, 6], [53, 44, 35, 26, 17, 8], [61, 60, 59, 58, 57, 56]], [[55, 47, 39, 31, 23, 15, 7], [54, 45, 36, 27, 18, 9, 0], [62, 61, 60, 59, 58, 57, 56]]], 'N1': [[[17], [10]], [[16], [18], [11]], [[8], [17], [19], [12]], [[9], [18], [20], [13]], [[10], [19], [21], [14]], [[11], [20], [22], [15]], [[12], [21], [23]], [[13], [22]], [[2], [25], [18]], [[3], [24], [26], [19]], [[4], [0], [16], [25], [27], [20]], [[5], [1], [17], [26], [28], [21]], [[6], [2], [18], [27], [29], [22]], [[7], [3], [19], [28], [30], [23]], [[4], [20], [29], [31]], [[5], [21], [30]], [[10], [1], [33], [26]], [[11], [2], [0], [32], [34], [27]], [[12], [3], [1], [8], [24], [33], [35], [28]], [[13], [4], [2], [9], [25], [34], [36], [29]], [[14], [5], [3], [10], [26], [35], [37], [30]], [[15], [6], [4], [11], [27], [36], [38], [31]], [[7], [5], [12], [28], [37], [39]], [[6], [13], [29], [38]], [[18], [9], [41], [34]], [[19], [10], [8], [40], [42], [35]], [[20], [11], [9], [16], [32], [41], [43], [36]], [[21], [12], [10], [17], [33], [42], [44], [37]], [[22], [13], [11], [18], [34], [43], [45], [38]], [[23], [14], [12], [19], [35], [44], [46], [39]], [[15], [13], [20], [36], [45], [47]], [[14], [21], [37], [46]], [[26], [17], [49], [42]], [[27], [18], [16], [48], [50], [43]], [[28], [19], [17], [24], [40], [49], [51], [44]], [[29], [20], [18], [25], [41], [50], [52], [45]], [[30], [21], [19], [26], [42], [51], [53], [46]], [[31], [22], [20], [27], [43], [52], [54], [47]], [[23], [21], [28], [44], [53], [55]], [[22], [29], [45], [54]], [[34], [25], [57], [50]], [[35], [26], [24], [56], [58], [51]], [[36], [27], [25], [32], [48], [57], [59], [52]], [[37], [28], [26], [33], [49], [58], [60], [53]], [[38], [29], [27], [34], [50], [59], [61], [54]], [[39], [30], [28], [35], [51], [60], [62], [55]], [[31], [29], [36], [52], [61], [63]], [[30], [37], [53], [62]], [[42], [33], [58]], [[43], [34], [32], [59]], [[44], [35], [33], [40], [56], [60]], [[45], [36], [34], [41], [57], [61]], [[46], [37], [35], [42], [58], [62]], [[47], [38], [36], [43], [59], [63]], [[39], [37], [44], [60]], [[38], [45], [61]], [[50], [41]], [[51], [42], [40]], [[52], [43], [41], [48]], [[53], [44], [42], [49]], [[54], [45], [43], [50]], [[55], [46], [44], [51]], [[47], [45], [52]], [[46], [53]]], 'N2': [[[17], [10]], [[16], [18], [11]], [[8], [17], [19], [12]], [[9], [18], [20], [13]], [[10], [19], [21], [14]], [[11], [20], [22], [15]], [[12], [21], [23]], [[13], [22]], [[2], [25], [18]], [[3], [24], [26], [19]], [[4], [0], [16], [25], [27], [20]], [[5], [1], [17], [26], [28], [21]], [[6], [2], [18], [27], [29], [22]], [[7], [3], [19], [28], [30], [23]], [[4], [20], [29], [31]], [[5], [21], [30]], [[10], [1], [33], [26]], [[11], [2], [0], [32], [34], [27]], [[12], [3], [1], [8], [24], [33], [35], [28]], [[13], [4], [2], [9], [25], [34], [36], [29]], [[14], [5], [3], [10], [26], [35], [37], [30]], [[15], [6], [4], [11], [27], [36], [38], [31]], [[7], [5], [12], [28], [37], [39]], [[6], [13], [29], [38]], [[18], [9], [41], [34]], [[19], [10], [8], [40], [42], [35]], [[20], [11], [9], [16], [32], [41], [43], [36]], [[21], [12], [10], [17], [33], [42], [44], [37]], [[22], [13], [11], [18], [34], [43], [45], [38]], [[23], [14], [12], [19], [35], [44], [46], [39]], [[15], [13], [20], [36], [45], [47]], [[14], [21], [37], [46]], [[26], [17], [49], [42]], [[27], [18], [16], [48], [50], [43]], [[28], [19], [17], [24], [40], [49], [51], [44]], [[29], [20], [18], [25], [41], [50], [52], [45]], [[30], [21], [19], [26], [42], [51], [53], [46]], [[31], [22], [20], [27], [43], [52], [54], [47]], [[23], [21], [28], [44], [53], [55]], [[22], [29], [45], [54]], [[34], [25], [57], [50]], [[35], [26], [24], [56], [58], [51]], [[36], [27], [25], [32], [48], [57], [59], [52]], [[37], [28], [26], [33], [49], [58], [60], [53]], [[38], [29], [27], [34], [50], [59], [61], [54]], [[39], [30], [28], [35], [51], [60], [62], [55]], [[31], [29], [36], [52], [61], [63]], [[30], [37], [53], [62]], [[42], [33], [58]], [[43], [34], [32], [59]], [[44], [35], [33], [40], [56], [60]], [[45], [36], [34], [41], [57], [61]], [[46], [37], [35], [42], [58], [62]], [[47], [38], [36], [43], [59], [63]], [[39], [37], [44], [60]], [[38], [45], [61]], [[50], [41]], [[51], [42], [40]], [[52], [43], [41], [48]], [[53], [44], [42], [49]], [[54], [45], [43], [50]], [[55], [46], [44], [51]], [[47], [45], [52]], [[46], [53]]], 'r1': [[[1, 2, 3, 4, 5, 6, 7], [8, 16, 24, 32, 40, 48, 56]], [[2, 3, 4, 5, 6, 7], [0], [9, 17, 25, 33, 41, 49, 57]], [[3, 4, 5, 6, 7], [1, 0], [10, 18, 26, 34, 42, 50, 58]], [[4, 5, 6, 7], [2, 1, 0], [11, 19, 27, 35, 43, 51, 59]], [[5, 6, 7], [3, 2, 1, 0], [12, 20, 28, 36, 44, 52, 60]], [[6, 7], [4, 3, 2, 1, 0], [13, 21, 29, 37, 45, 53, 61]], [[7], [5, 4, 3, 2, 1, 0], [14, 22, 30, 38, 46, 54, 62]], [[6, 5, 4, 3, 2, 1, 0], [15, 23, 31, 39, 47, 55, 63]], [[9, 10, 11, 12, 13, 14, 15], [0], [16, 24, 32, 40, 48, 56]], [[10, 11, 12, 13, 14, 15], [1], [8], [17, 25, 33, 41, 49, 57]], [[11, 12, 13, 14, 15], [2], [9, 8], [18, 26, 34, 42, 50, 58]], [[12, 13, 14, 15], [3], [10, 9, 8], [19, 27, 35, 43, 51, 59]], [[13, 14, 15], [4], [11, 10, 9, 8], [20, 28, 36, 44, 52, 60]], [[14, 15], [5], [12, 11, 10, 9, 8], [21, 29, 37, 45, 53, 61]], [[15], [6], [13, 12, 11, 10, 9, 8], [22, 30, 38, 46, 54, 62]], [[7], [14, 13, 12, 11, 10, 9, 8], [23, 31, 39, 47, 55, 63]], [[17, 18, 19, 20, 21, 22, 23], [8, 0], [24, 32, 40, 48, 56]], [[18, 19, 20, 21, 22, 23], [9, 1], [16], [25, 33, 41, 49, 57]], [[19, 20, 21, 22, 23], [10, 2], [17, 16], [26, 34, 42, 50, 58]], [[20, 21, 22, 23], [11, 3], [18, 17, 16], [27, 35, 43, 51, 59]], [[21, 22, 23], [12, 4], [19, 18, 17, 16], [28, 36, 44, 52, 60]], [[22, 23], [13, 5], [20, 19, 18, 17, 16], [29, 37, 45, 53, 61]], [[23], [14, 6], [21, 20, 19, 18, 17, 16], [30, 38, 46, 54, 62]], [[15, 7], [22, 21, 20, 19, 18, 17, 16], [31, 39, 47, 55, 63]], [[25, 26, 27, 28, 29, 30, 31], [16, 8, 0], [32, 40, 48, 56]], [[26, 27, 28, 29, 30, 31], [17, 9, 1], [24], [33, 41, 49, 57]], [[27, 28, 29, 30, 31], [18, 10, 2], [25, 24], [34, 42, 50, 58]], [[28, 29, 30, 31], [19, 11, 3], [26, 25, 24], [35, 43, 51, 59]], [[29, 30, 31], [20, 12, 4], [27, 26, 25, 24], [36, 44, 52, 60]], [[30, 31], [21, 13, 5], [28, 27, 26, 25, 24], [37, 45, 53, 61]], [[31], [22, 14, 6], [29, 28, 27, 26, 25, 24], [38, 46, 54, 62]], [[23, 15, 7], [30, 29, 28, 27, 26, 25, 24], [39, 47, 55, 63]], [[33, 34, 35, 36, 37, 38, 39], [24, 16, 8, 0], [40, 48, 56]], [[34, 35, 36, 37, 38, 39], [25, 17, 9, 1], [32], [41, 49, 57]], [[35, 36, 37, 38, 39], [26, 18, 10, 2], [33, 32], [42, 50, 58]], [[36, 37, 38, 39], [27, 19, 11, 3], [34, 33, 32], [43, 51, 59]], [[37, 38, 39], [28, 20, 12, 4], [35, 34, 33, 32], [44, 52, 60]], [[38, 39], [29, 21, 13, 5], [36, 35, 34, 33, 32], [45, 53, 61]], [[39], [30, 22, 14, 6], [37, 36, 35, 34, 33, 32], [46, 54, 62]], [[31, 23, 15, 7], [38, 37, 36, 35, 34, 33, 32], [47, 55, 63]], [[41, 42, 43, 44, 45, 46, 47], [32, 24, 16, 8, 0], [48, 56]], [[42, 43, 44, 45, 46, 47], [33, 25, 17, 9, 1], [40], [49, 57]], [[43, 44, 45, 46, 47], [34, 26, 18, 10, 2], [41, 40], [50, 58]], [[44, 45, 46, 47], [35, 27, 19, 11, 3], [42, 41, 40], [51, 59]], [[45, 46, 47], [36, 28, 20, 12, 4], [43, 42, 41, 40], [52, 60]], [[46, 47], [37, 29, 21, 13, 5], [44, 43, 42, 41, 40], [53, 61]], [[47], [38, 30, 22, 14, 6], [45, 44, 43, 42, 41, 40], [54, 62]], [[39, 31, 23, 15, 7], [46, 45, 44, 43, 42, 41, 40], [55, 63]], [[49, 50, 51, 52, 53, 54, 55], [40, 32, 24, 16, 8, 0], [56]], [[50, 51, 52, 53, 54, 55], [41, 33, 25, 17, 9, 1], [48], [57]], [[51, 52, 53, 54, 55], [42, 34, 26, 18, 10, 2], [49, 48], [58]], [[52, 53, 54, 55], [43, 35, 27, 19, 11, 3], [50, 49, 48], [59]], [[53, 54, 55], [44, 36, 28, 20, 12, 4], [51, 50, 49, 48], [60]], [[54, 55], [45, 37, 29, 21, 13, 5], [52, 51, 50, 49, 48], [61]], [[55], [46, 38, 30, 22, 14, 6], [53, 52, 51, 50, 49, 48], [62]], [[47, 39, 31, 23, 15, 7], [54, 53, 52, 51, 50, 49, 48], [63]], [[57, 58, 59, 60, 61, 62, 63], [48, 40, 32, 24, 16, 8, 0]], [[58, 59, 60, 61, 62, 63], [49, 41, 33, 25, 17, 9, 1], [56]], [[59, 60, 61, 62, 63], [50, 42, 34, 26, 18, 10, 2], [57, 56]], [[60, 61, 62, 63], [51, 43, 35, 27, 19, 11, 3], [58, 57, 56]], [[61, 62, 63], [52, 44, 36, 28, 20, 12, 4], [59, 58, 57, 56]], [[62, 63], [53, 45, 37, 29, 21, 13, 5], [60, 59, 58, 57, 56]], [[63], [54, 46, 38, 30, 22, 14, 6], [61, 60, 59, 58, 57, 56]], [[55, 47, 39, 31, 23, 15, 7], [62, 61, 60, 59, 58, 57, 56]]], 'p2': [[], [], [], [], [], [], [], [], [[16, 24], [17]], [[16], [17, 25], [18]], [[17], [18, 26], [19]], [[18], [19, 27], [20]], [[19], [20, 28], [21]], [[20], [21, 29], [22]], [[21], [22, 30], [23]], [[22], [23, 31]], [[24], [25]], [[24], [25], [26]], [[25], [26], [27]], [[26], [27], [28]], [[27], [28], [29]], [[28], [29], [30]], [[29], [30], [31]], [[30], [31]], [[32], [33]], [[32], [33], [34]], [[33], [34], [35]], [[34], [35], [36]], [[35], [36], [37]], [[36], [37], [38]], [[37], [38], [39]], [[38], [39]], [[40], [41]], [[40], [41], [42]], [[41], [42], [43]], [[42], [43], [44]], [[43], [44], [45]], [[44], [45], [46]], [[45], [46], [47]], [[46], [47]], [[48], [49]], [[48], [49], [50]], [[49], [50], [51]], [[50], [51], [52]], [[51], [52], [53]], [[52], [53], [54]], [[53], [54], [55]], [[54], [55]], [[56], [57]], [[56], [57], [58]], [[57], [58], [59]], [[58], [59], [60]], [[59], [60], [61]], [[60], [61], [62]], [[61], [62], [63]], [[62], [63]], [], [], [], [], [], [], [], []], 'p3': [[], [], [], [], [], [], [], [], [[16, 24], [17]], [[16], [17, 25], [18]], [[17], [18, 26], [19]], [[18], [19, 27], [20]], [[19], [20, 28], [21]], [[20], [21, 29], [22]], [[21], [22, 30], [23]], [[22], [23, 31]], [[24], [25]], [[24], [25], [26]], [[25], [26], [27]], [[26], [27], [28]], [[27], [28], [29]], [[28], [29], [30]], [[29], [30], [31]], [[30], [31]], [[32], [33]], [[32], [33], [34]], [[33], [34], [35]], [[34], [35], [36]], [[35], [36], [37]], [[36], [37], [38]], [[37], [38], [39]], [[38], [39]], [[40], [41]], [[40], [41], [42]], [[41], [42], [43]], [[42], [43], [44]], [[43], [44], [45]], [[44], [45], [46]], [[45], [46], [47]], [[46], [47]], [[48], [49]], [[48], [49], [50]], [[49], [50], [51]], [[50], [51], [52]], [[51], [52], [53]], [[52], [53], [54]], [[53], [54], [55]], [[54], [55]], [[56], [57]], [[56], [57], [58]], [[57], [58], [59]], [[58], [59], [60]], [[59], [60], [61]], [[60], [61], [62]], [[61], [62], [63]], [[62], [63]], [], [], [], [], [], [], [], []], 'p1': [[], [], [], [], [], [], [], [], [[16, 24], [17]], [[16], [17, 25], [18]], [[17], [18, 26], [19]], [[18], [19, 27], [20]], [[19], [20, 28], [21]], [[20], [21, 29], [22]], [[21], [22, 30], [23]], [[22], [23, 31]], [[24], [25]], [[24], [25], [26]], [[25], [26], [27]], [[26], [27], [28]], [[27], [28], [29]], [[28], [29], [30]], [[29], [30], [31]], [[30], [31]], [[32], [33]], [[32], [33], [34]], [[33], [34], [35]], [[34], [35], [36]], [[35], [36], [37]], [[36], [37], [38]], [[37], [38], [39]], [[38], [39]], [[40], [41]], [[40], [41], [42]], [[41], [42], [43]], [[42], [43], [44]], [[43], [44], [45]], [[44], [45], [46]], [[45], [46], [47]], [[46], [47]], [[48], [49]], [[48], [49], [50]], [[49], [50], [51]], [[50], [51], [52]], [[51], [52], [53]], [[52], [53], [54]], [[53], [54], [55]], [[54], [55]], [[56], [57]], [[56], [57], [58]], [[57], [58], [59]], [[58], [59], [60]], [[59], [60], [61]], [[60], [61], [62]], [[61], [62], [63]], [[62], [63]], [], [], [], [], [], [], [], []], 'p6': [[], [], [], [], [], [], [], [], [[16, 24], [17]], [[16], [17, 25], [18]], [[17], [18, 26], [19]], [[18], [19, 27], [20]], [[19], [20, 28], [21]], [[20], [21, 29], [22]], [[21], [22, 30], [23]], [[22], [23, 31]], [[24], [25]], [[24], [25], [26]], [[25], [26], [27]], [[26], [27], [28]], [[27], [28], [29]], [[28], [29], [30]], [[29], [30], [31]], [[30], [31]], [[32], [33]], [[32], [33], [34]], [[33], [34], [35]], [[34], [35], [36]], [[35], [36], [37]], [[36], [37], [38]], [[37], [38], [39]], [[38], [39]], [[40], [41]], [[40], [41], [42]], [[41], [42], [43]], [[42], [43], [44]], [[43], [44], [45]], [[44], [45], [46]], [[45], [46], [47]], [[46], [47]], [[48], [49]], [[48], [49], [50]], [[49], [50], [51]], [[50], [51], [52]], [[51], [52], [53]], [[52], [53], [54]], [[53], [54], [55]], [[54], [55]], [[56], [57]], [[56], [57], [58]], [[57], [58], [59]], [[58], [59], [60]], [[59], [60], [61]], [[60], [61], [62]], [[61], [62], [63]], [[62], [63]], [], [], [], [], [], [], [], []], 'r2': [[[1, 2, 3, 4, 5, 6, 7], [8, 16, 24, 32, 40, 48, 56]], [[2, 3, 4, 5, 6, 7], [0], [9, 17, 25, 33, 41, 49, 57]], [[3, 4, 5, 6, 7], [1, 0], [10, 18, 26, 34, 42, 50, 58]], [[4, 5, 6, 7], [2, 1, 0], [11, 19, 27, 35, 43, 51, 59]], [[5, 6, 7], [3, 2, 1, 0], [12, 20, 28, 36, 44, 52, 60]], [[6, 7], [4, 3, 2, 1, 0], [13, 21, 29, 37, 45, 53, 61]], [[7], [5, 4, 3, 2, 1, 0], [14, 22, 30, 38, 46, 54, 62]], [[6, 5, 4, 3, 2, 1, 0], [15, 23, 31, 39, 47, 55, 63]], [[9, 10, 11, 12, 13, 14, 15], [0], [16, 24, 32, 40, 48, 56]], [[10, 11, 12, 13, 14, 15], [1], [8], [17, 25, 33, 41, 49, 57]], [[11, 12, 13, 14, 15], [2], [9, 8], [18, 26, 34, 42, 50, 58]], [[12, 13, 14, 15], [3], [10, 9, 8], [19, 27, 35, 43, 51, 59]], [[13, 14, 15], [4], [11, 10, 9, 8], [20, 28, 36, 44, 52, 60]], [[14, 15], [5], [12, 11, 10, 9, 8], [21, 29, 37, 45, 53, 61]], [[15], [6], [13, 12, 11, 10, 9, 8], [22, 30, 38, 46, 54, 62]], [[7], [14, 13, 12, 11, 10, 9, 8], [23, 31, 39, 47, 55, 63]], [[17, 18, 19, 20, 21, 22, 23], [8, 0], [24, 32, 40, 48, 56]], [[18, 19, 20, 21, 22, 23], [9, 1], [16], [25, 33, 41, 49, 57]], [[19, 20, 21, 22, 23], [10, 2], [17, 16], [26, 34, 42, 50, 58]], [[20, 21, 22, 23], [11, 3], [18, 17, 16], [27, 35, 43, 51, 59]], [[21, 22, 23], [12, 4], [19, 18, 17, 16], [28, 36, 44, 52, 60]], [[22, 23], [13, 5], [20, 19, 18, 17, 16], [29, 37, 45, 53, 61]], [[23], [14, 6], [21, 20, 19, 18, 17, 16], [30, 38, 46, 54, 62]], [[15, 7], [22, 21, 20, 19, 18, 17, 16], [31, 39, 47, 55, 63]], [[25, 26, 27, 28, 29, 30, 31], [16, 8, 0], [32, 40, 48, 56]], [[26, 27, 28, 29, 30, 31], [17, 9, 1], [24], [33, 41, 49, 57]], [[27, 28, 29, 30, 31], [18, 10, 2], [25, 24], [34, 42, 50, 58]], [[28, 29, 30, 31], [19, 11, 3], [26, 25, 24], [35, 43, 51, 59]], [[29, 30, 31], [20, 12, 4], [27, 26, 25, 24], [36, 44, 52, 60]], [[30, 31], [21, 13, 5], [28, 27, 26, 25, 24], [37, 45, 53, 61]], [[31], [22, 14, 6], [29, 28, 27, 26, 25, 24], [38, 46, 54, 62]], [[23, 15, 7], [30, 29, 28, 27, 26, 25, 24], [39, 47, 55, 63]], [[33, 34, 35, 36, 37, 38, 39], [24, 16, 8, 0], [40, 48, 56]], [[34, 35, 36, 37, 38, 39], [25, 17, 9, 1], [32], [41, 49, 57]], [[35, 36, 37, 38, 39], [26, 18, 10, 2], [33, 32], [42, 50, 58]], [[36, 37, 38, 39], [27, 19, 11, 3], [34, 33, 32], [43, 51, 59]], [[37, 38, 39], [28, 20, 12, 4], [35, 34, 33, 32], [44, 52, 60]], [[38, 39], [29, 21, 13, 5], [36, 35, 34, 33, 32], [45, 53, 61]], [[39], [30, 22, 14, 6], [37, 36, 35, 34, 33, 32], [46, 54, 62]], [[31, 23, 15, 7], [38, 37, 36, 35, 34, 33, 32], [47, 55, 63]], [[41, 42, 43, 44, 45, 46, 47], [32, 24, 16, 8, 0], [48, 56]], [[42, 43, 44, 45, 46, 47], [33, 25, 17, 9, 1], [40], [49, 57]], [[43, 44, 45, 46, 47], [34, 26, 18, 10, 2], [41, 40], [50, 58]], [[44, 45, 46, 47], [35, 27, 19, 11, 3], [42, 41, 40], [51, 59]], [[45, 46, 47], [36, 28, 20, 12, 4], [43, 42, 41, 40], [52, 60]], [[46, 47], [37, 29, 21, 13, 5], [44, 43, 42, 41, 40], [53, 61]], [[47], [38, 30, 22, 14, 6], [45, 44, 43, 42, 41, 40], [54, 62]], [[39, 31, 23, 15, 7], [46, 45, 44, 43, 42, 41, 40], [55, 63]], [[49, 50, 51, 52, 53, 54, 55], [40, 32, 24, 16, 8, 0], [56]], [[50, 51, 52, 53, 54, 55], [41, 33, 25, 17, 9, 1], [48], [57]], [[51, 52, 53, 54, 55], [42, 34, 26, 18, 10, 2], [49, 48], [58]], [[52, 53, 54, 55], [43, 35, 27, 19, 11, 3], [50, 49, 48], [59]], [[53, 54, 55], [44, 36, 28, 20, 12, 4], [51, 50, 49, 48], [60]], [[54, 55], [45, 37, 29, 21, 13, 5], [52, 51, 50, 49, 48], [61]], [[55], [46, 38, 30, 22, 14, 6], [53, 52, 51, 50, 49, 48], [62]], [[47, 39, 31, 23, 15, 7], [54, 53, 52, 51, 50, 49, 48], [63]], [[57, 58, 59, 60, 61, 62, 63], [48, 40, 32, 24, 16, 8, 0]], [[58, 59, 60, 61, 62, 63], [49, 41, 33, 25, 17, 9, 1], [56]], [[59, 60, 61, 62, 63], [50, 42, 34, 26, 18, 10, 2], [57, 56]], [[60, 61, 62, 63], [51, 43, 35, 27, 19, 11, 3], [58, 57, 56]], [[61, 62, 63], [52, 44, 36, 28, 20, 12, 4], [59, 58, 57, 56]], [[62, 63], [53, 45, 37, 29, 21, 13, 5], [60, 59, 58, 57, 56]], [[63], [54, 46, 38, 30, 22, 14, 6], [61, 60, 59, 58, 57, 56]], [[55, 47, 39, 31, 23, 15, 7], [62, 61, 60, 59, 58, 57, 56]]], 'p4': [[], [], [], [], [], [], [], [], [[16, 24], [17]], [[16], [17, 25], [18]], [[17], [18, 26], [19]], [[18], [19, 27], [20]], [[19], [20, 28], [21]], [[20], [21, 29], [22]], [[21], [22, 30], [23]], [[22], [23, 31]], [[24], [25]], [[24], [25], [26]], [[25], [26], [27]], [[26], [27], [28]], [[27], [28], [29]], [[28], [29], [30]], [[29], [30], [31]], [[30], [31]], [[32], [33]], [[32], [33], [34]], [[33], [34], [35]], [[34], [35], [36]], [[35], [36], [37]], [[36], [37], [38]], [[37], [38], [39]], [[38], [39]], [[40], [41]], [[40], [41], [42]], [[41], [42], [43]], [[42], [43], [44]], [[43], [44], [45]], [[44], [45], [46]], [[45], [46], [47]], [[46], [47]], [[48], [49]], [[48], [49], [50]], [[49], [50], [51]], [[50], [51], [52]], [[51], [52], [53]], [[52], [53], [54]], [[53], [54], [55]], [[54], [55]], [[56], [57]], [[56], [57], [58]], [[57], [58], [59]], [[58], [59], [60]], [[59], [60], [61]], [[60], [61], [62]], [[61], [62], [63]], [[62], [63]], [], [], [], [], [], [], [], []], 'p5': [[], [], [], [], [], [], [], [], [[16, 24], [17]], [[16], [17, 25], [18]], [[17], [18, 26], [19]], [[18], [19, 27], [20]], [[19], [20, 28], [21]], [[20], [21, 29], [22]], [[21], [22, 30], [23]], [[22], [23, 31]], [[24], [25]], [[24], [25], [26]], [[25], [26], [27]], [[26], [27], [28]], [[27], [28], [29]], [[28], [29], [30]], [[29], [30], [31]], [[30], [31]], [[32], [33]], [[32], [33], [34]], [[33], [34], [35]], [[34], [35], [36]], [[35], [36], [37]], [[36], [37], [38]], [[37], [38], [39]], [[38], [39]], [[40], [41]], [[40], [41], [42]], [[41], [42], [43]], [[42], [43], [44]], [[43], [44], [45]], [[44], [45], [46]], [[45], [46], [47]], [[46], [47]], [[48], [49]], [[48], [49], [50]], [[49], [50], [51]], [[50], [51], [52]], [[51], [52], [53]], [[52], [53], [54]], [[53], [54], [55]], [[54], [55]], [[56], [57]], [[56], [57], [58]], [[57], [58], [59]], [[58], [59], [60]], [[59], [60], [61]], [[60], [61], [62]], [[61], [62], [63]], [[62], [63]], [], [], [], [], [], [], [], []], 'k': [[[1], [8], [9]], [[2], [0], [8], [9], [10]], [[3], [1], [9], [10], [11]], [[4], [2], [10], [11], [12]], [[5, 6], [3, 2], [11], [12], [13]], [[6], [4], [12], [13], [14]], [[7], [5], [13], [14], [15]], [[6], [14], [15]], [[9], [1], [0], [16], [17]], [[10], [2], [1], [0], [8], [16], [17], [18]], [[11], [3], [2], [1], [9], [17], [18], [19]], [[12], [4], [3], [2], [10], [18], [19], [20]], [[13], [5], [4], [3], [11], [19], [20], [21]], [[14], [6], [5], [4], [12], [20], [21], [22]], [[15], [7], [6], [5], [13], [21], [22], [23]], [[7], [6], [14], [22], [23]], [[17], [9], [8], [24], [25]], [[18], [10], [9], [8], [16], [24], [25], [26]], [[19], [11], [10], [9], [17], [25], [26], [27]], [[20], [12], [11], [10], [18], [26], [27], [28]], [[21], [13], [12], [11], [19], [27], [28], [29]], [[22], [14], [13], [12], [20], [28], [29], [30]], [[23], [15], [14], [13], [21], [29], [30], [31]], [[15], [14], [22], [30], [31]], [[25], [17], [16], [32], [33]], [[26], [18], [17], [16], [24], [32], [33], [34]], [[27], [19], [18], [17], [25], [33], [34], [35]], [[28], [20], [19], [18], [26], [34], [35], [36]], [[29], [21], [20], [19], [27], [35], [36], [37]], [[30], [22], [21], [20], [28], [36], [37], [38]], [[31], [23], [22], [21], [29], [37], [38], [39]], [[23], [22], [30], [38], [39]], [[33], [25], [24], [40], [41]], [[34], [26], [25], [24], [32], [40], [41], [42]], [[35], [27], [26], [25], [33], [41], [42], [43]], [[36], [28], [27], [26], [34], [42], [43], [44]], [[37], [29], [28], [27], [35], [43], [44], [45]], [[38], [30], [29], [28], [36], [44], [45], [46]], [[39], [31], [30], [29], [37], [45], [46], [47]], [[31], [30], [38], [46], [47]], [[41], [33], [32], [48], [49]], [[42], [34], [33], [32], [40], [48], [49], [50]], [[43], [35], [34], [33], [41], [49], [50], [51]], [[44], [36], [35], [34], [42], [50], [51], [52]], [[45], [37], [36], [35], [43], [51], [52], [53]], [[46], [38], [37], [36], [44], [52], [53], [54]], [[47], [39], [38], [37], [45], [53], [54], [55]], [[39], [38], [46], [54], [55]], [[49], [41], [40], [56], [57]], [[50], [42], [41], [40], [48], [56], [57], [58]], [[51], [43], [42], [41], [49], [57], [58], [59]], [[52], [44], [43], [42], [50], [58], [59], [60]], [[53], [45], [44], [43], [51], [59], [60], [61]], [[54], [46], [45], [44], [52], [60], [61], [62]], [[55], [47], [46], [45], [53], [61], [62], [63]], [[47], [46], [54], [62], [63]], [[57], [49], [48]], [[58], [50], [49], [48], [56]], [[59], [51], [50], [49], [57]], [[60], [52], [51], [50], [58]], [[61], [53], [52], [51], [59]], [[62], [54], [53], [52], [60]], [[63], [55], [54], [53], [61]], [[55], [54], [62]]], 'q': [[[1, 2, 3, 4, 5, 6, 7], [8, 16, 24, 32, 40, 48, 56], [9, 18, 27, 36, 45, 54, 63]], [[2, 3, 4, 5, 6, 7], [0], [8], [9, 17, 25, 33, 41, 49, 57], [10, 19, 28, 37, 46, 55]], [[3, 4, 5, 6, 7], [1, 0], [9, 16], [10, 18, 26, 34, 42, 50, 58], [11, 20, 29, 38, 47]], [[4, 5, 6, 7], [2, 1, 0], [10, 17, 24], [11, 19, 27, 35, 43, 51, 59], [12, 21, 30, 39]], [[5, 6, 7], [3, 2, 1, 0], [11, 18, 25, 32], [12, 20, 28, 36, 44, 52, 60], [13, 22, 31]], [[6, 7], [4, 3, 2, 1, 0], [12, 19, 26, 33, 40], [13, 21, 29, 37, 45, 53, 61], [14, 23]], [[7], [5, 4, 3, 2, 1, 0], [13, 20, 27, 34, 41, 48], [14, 22, 30, 38, 46, 54, 62], [15]], [[6, 5, 4, 3, 2, 1, 0], [14, 21, 28, 35, 42, 49, 56], [15, 23, 31, 39, 47, 55, 63]], [[9, 10, 11, 12, 13, 14, 15], [1], [0], [16, 24, 32, 40, 48, 56], [17, 26, 35, 44, 53, 62]], [[10, 11, 12, 13, 14, 15], [2], [1], [0], [8], [16], [17, 25, 33, 41, 49, 57], [18, 27, 36, 45, 54, 63]], [[11, 12, 13, 14, 15], [3], [2], [1], [9, 8], [17, 24], [18, 26, 34, 42, 50, 58], [19, 28, 37, 46, 55]], [[12, 13, 14, 15], [4], [3], [2], [10, 9, 8], [18, 25, 32], [19, 27, 35, 43, 51, 59], [20, 29, 38, 47]], [[13, 14, 15], [5], [4], [3], [11, 10, 9, 8], [19, 26, 33, 40], [20, 28, 36, 44, 52, 60], [21, 30, 39]], [[14, 15], [6], [5], [4], [12, 11, 10, 9, 8], [20, 27, 34, 41, 48], [21, 29, 37, 45, 53, 61], [22, 31]], [[15], [7], [6], [5], [13, 12, 11, 10, 9, 8], [21, 28, 35, 42, 49, 56], [22, 30, 38, 46, 54, 62], [23]], [[7], [6], [14, 13, 12, 11, 10, 9, 8], [22, 29, 36, 43, 50, 57], [23, 31, 39, 47, 55, 63]], [[17, 18, 19, 20, 21, 22, 23], [9, 2], [8, 0], [24, 32, 40, 48, 56], [25, 34, 43, 52, 61]], [[18, 19, 20, 21, 22, 23], [10, 3], [9, 1], [8], [16], [24], [25, 33, 41, 49, 57], [26, 35, 44, 53, 62]], [[19, 20, 21, 22, 23], [11, 4], [10, 2], [9, 0], [17, 16], [25, 32], [26, 34, 42, 50, 58], [27, 36, 45, 54, 63]], [[20, 21, 22, 23], [12, 5], [11, 3], [10, 1], [18, 17, 16], [26, 33, 40], [27, 35, 43, 51, 59], [28, 37, 46, 55]], [[21, 22, 23], [13, 6], [12, 4], [11, 2], [19, 18, 17, 16], [27, 34, 41, 48], [28, 36, 44, 52, 60], [29, 38, 47]], [[22, 23], [14, 7], [13, 5], [12, 3], [20, 19, 18, 17, 16], [28, 35, 42, 49, 56], [29, 37, 45, 53, 61], [30, 39]], [[23], [15], [14, 6], [13, 4], [21, 20, 19, 18, 17, 16], [29, 36, 43, 50, 57], [30, 38, 46, 54, 62], [31]], [[15, 7], [14, 5], [22, 21, 20, 19, 18, 17, 16], [30, 37, 44, 51, 58], [31, 39, 47, 55, 63]], [[25, 26, 27, 28, 29, 30, 31], [17, 10, 3], [16, 8, 0], [32, 40, 48, 56], [33, 42, 51, 60]], [[26, 27, 28, 29, 30, 31], [18, 11, 4], [17, 9, 1], [16], [24], [32], [33, 41, 49, 57], [34, 43, 52, 61]], [[27, 28, 29, 30, 31], [19, 12, 5], [18, 10, 2], [17, 8], [25, 24], [33, 40], [34, 42, 50, 58], [35, 44, 53, 62]], [[28, 29, 30, 31], [20, 13, 6], [19, 11, 3], [18, 9, 0], [26, 25, 24], [34, 41, 48], [35, 43, 51, 59], [36, 45, 54, 63]], [[29, 30, 31], [21, 14, 7], [20, 12, 4], [19, 10, 1], [27, 26, 25, 24], [35, 42, 49, 56], [36, 44, 52, 60], [37, 46, 55]], [[30, 31], [22, 15], [21, 13, 5], [20, 11, 2], [28, 27, 26, 25, 24], [36, 43, 50, 57], [37, 45, 53, 61], [38, 47]], [[31], [23], [22, 14, 6], [21, 12, 3], [29, 28, 27, 26, 25, 24], [37, 44, 51, 58], [38, 46, 54, 62], [39]], [[23, 15, 7], [22, 13, 4], [30, 29, 28, 27, 26, 25, 24], [38, 45, 52, 59], [39, 47, 55, 63]], [[33, 34, 35, 36, 37, 38, 39], [25, 18, 11, 4], [24, 16, 8, 0], [40, 48, 56], [41, 50, 59]], [[34, 35, 36, 37, 38, 39], [26, 19, 12, 5], [25, 17, 9, 1], [24], [32], [40], [41, 49, 57], [42, 51, 60]], [[35, 36, 37, 38, 39], [27, 20, 13, 6], [26, 18, 10, 2], [25, 16], [33, 32], [41, 48], [42, 50, 58], [43, 52, 61]], [[36, 37, 38, 39], [28, 21, 14, 7], [27, 19, 11, 3], [26, 17, 8], [34, 33, 32], [42, 49, 56], [43, 51, 59], [44, 53, 62]], [[37, 38, 39], [29, 22, 15], [28, 20, 12, 4], [27, 18, 9, 0], [35, 34, 33, 32], [43, 50, 57], [44, 52, 60], [45, 54, 63]], [[38, 39], [30, 23], [29, 21, 13, 5], [28, 19, 10, 1], [36, 35, 34, 33, 32], [44, 51, 58], [45, 53, 61], [46, 55]], [[39], [31], [30, 22, 14, 6], [29, 20, 11, 2], [37, 36, 35, 34, 33, 32], [45, 52, 59], [46, 54, 62], [47]], [[31, 23, 15, 7], [30, 21, 12, 3], [38, 37, 36, 35, 34, 33, 32], [46, 53, 60], [47, 55, 63]], [[41, 42, 43, 44, 45, 46, 47], [33, 26, 19, 12, 5], [32, 24, 16, 8, 0], [48, 56], [49, 58]], [[42, 43, 44, 45, 46, 47], [34, 27, 20, 13, 6], [33, 25, 17, 9, 1], [32], [40], [48], [49, 57], [50, 59]], [[43, 44, 45, 46, 47], [35, 28, 21, 14, 7], [34, 26, 18, 10, 2], [33, 24], [41, 40], [49, 56], [50, 58], [51, 60]], [[44, 45, 46, 47], [36, 29, 22, 15], [35, 27, 19, 11, 3], [34, 25, 16], [42, 41, 40], [50, 57], [51, 59], [52, 61]], [[45, 46, 47], [37, 30, 23], [36, 28, 20, 12, 4], [35, 26, 17, 8], [43, 42, 41, 40], [51, 58], [52, 60], [53, 62]], [[46, 47], [38, 31], [37, 29, 21, 13, 5], [36, 27, 18, 9, 0], [44, 43, 42, 41, 40], [52, 59], [53, 61], [54, 63]], [[47], [39], [38, 30, 22, 14, 6], [37, 28, 19, 10, 1], [45, 44, 43, 42, 41, 40], [53, 60], [54, 62], [55]], [[39, 31, 23, 15, 7], [38, 29, 20, 11, 2], [46, 45, 44, 43, 42, 41, 40], [54, 61], [55, 63]], [[49, 50, 51, 52, 53, 54, 55], [41, 34, 27, 20, 13, 6], [40, 32, 24, 16, 8, 0], [56], [57]], [[50, 51, 52, 53, 54, 55], [42, 35, 28, 21, 14, 7], [41, 33, 25, 17, 9, 1], [40], [48], [56], [57], [58]], [[51, 52, 53, 54, 55], [43, 36, 29, 22, 15], [42, 34, 26, 18, 10, 2], [41, 32], [49, 48], [57], [58], [59]], [[52, 53, 54, 55], [44, 37, 30, 23], [43, 35, 27, 19, 11, 3], [42, 33, 24], [50, 49, 48], [58], [59], [60]], [[53, 54, 55], [45, 38, 31], [44, 36, 28, 20, 12, 4], [43, 34, 25, 16], [51, 50, 49, 48], [59], [60], [61]], [[54, 55], [46, 39], [45, 37, 29, 21, 13, 5], [44, 35, 26, 17, 8], [52, 51, 50, 49, 48], [60], [61], [62]], [[55], [47], [46, 38, 30, 22, 14, 6], [45, 36, 27, 18, 9, 0], [53, 52, 51, 50, 49, 48], [61], [62], [63]], [[47, 39, 31, 23, 15, 7], [46, 37, 28, 19, 10, 1], [54, 53, 52, 51, 50, 49, 48], [62], [63]], [[57, 58, 59, 60, 61, 62, 63], [49, 42, 35, 28, 21, 14, 7], [48, 40, 32, 24, 16, 8, 0]], [[58, 59, 60, 61, 62, 63], [50, 43, 36, 29, 22, 15], [49, 41, 33, 25, 17, 9, 1], [48], [56]], [[59, 60, 61, 62, 63], [51, 44, 37, 30, 23], [50, 42, 34, 26, 18, 10, 2], [49, 40], [57, 56]], [[60, 61, 62, 63], [52, 45, 38, 31], [51, 43, 35, 27, 19, 11, 3], [50, 41, 32], [58, 57, 56]], [[61, 62, 63], [53, 46, 39], [52, 44, 36, 28, 20, 12, 4], [51, 42, 33, 24], [59, 58, 57, 56]], [[62, 63], [54, 47], [53, 45, 37, 29, 21, 13, 5], [52, 43, 34, 25, 16], [60, 59, 58, 57, 56]], [[63], [55], [54, 46, 38, 30, 22, 14, 6], [53, 44, 35, 26, 17, 8], [61, 60, 59, 58, 57, 56]], [[55, 47, 39, 31, 23, 15, 7], [54, 45, 36, 27, 18, 9, 0], [62, 61, 60, 59, 58, 57, 56]]], 'B1': [[[9, 18, 27, 36, 45, 54, 63]], [[8], [10, 19, 28, 37, 46, 55]], [[9, 16], [11, 20, 29, 38, 47]], [[10, 17, 24], [12, 21, 30, 39]], [[11, 18, 25, 32], [13, 22, 31]], [[12, 19, 26, 33, 40], [14, 23]], [[13, 20, 27, 34, 41, 48], [15]], [[14, 21, 28, 35, 42, 49, 56]], [[1], [17, 26, 35, 44, 53, 62]], [[2], [0], [16], [18, 27, 36, 45, 54, 63]], [[3], [1], [17, 24], [19, 28, 37, 46, 55]], [[4], [2], [18, 25, 32], [20, 29, 38, 47]], [[5], [3], [19, 26, 33, 40], [21, 30, 39]], [[6], [4], [20, 27, 34, 41, 48], [22, 31]], [[7], [5], [21, 28, 35, 42, 49, 56], [23]], [[6], [22, 29, 36, 43, 50, 57]], [[9, 2], [25, 34, 43, 52, 61]], [[10, 3], [8], [24], [26, 35, 44, 53, 62]], [[11, 4], [9, 0], [25, 32], [27, 36, 45, 54, 63]], [[12, 5], [10, 1], [26, 33, 40], [28, 37, 46, 55]], [[13, 6], [11, 2], [27, 34, 41, 48], [29, 38, 47]], [[14, 7], [12, 3], [28, 35, 42, 49, 56], [30, 39]], [[15], [13, 4], [29, 36, 43, 50, 57], [31]], [[14, 5], [30, 37, 44, 51, 58]], [[17, 10, 3], [33, 42, 51, 60]], [[18, 11, 4], [16], [32], [34, 43, 52, 61]], [[19, 12, 5], [17, 8], [33, 40], [35, 44, 53, 62]], [[20, 13, 6], [18, 9, 0], [34, 41, 48], [36, 45, 54, 63]], [[21, 14, 7], [19, 10, 1], [35, 42, 49, 56], [37, 46, 55]], [[22, 15], [20, 11, 2], [36, 43, 50, 57], [38, 47]], [[23], [21, 12, 3], [37, 44, 51, 58], [39]], [[22, 13, 4], [38, 45, 52, 59]], [[25, 18, 11, 4], [41, 50, 59]], [[26, 19, 12, 5], [24], [40], [42, 51, 60]], [[27, 20, 13, 6], [25, 16], [41, 48], [43, 52, 61]], [[28, 21, 14, 7], [26, 17, 8], [42, 49, 56], [44, 53, 62]], [[29, 22, 15], [27, 18, 9, 0], [43, 50, 57], [45, 54, 63]], [[30, 23], [28, 19, 10, 1], [44, 51, 58], [46, 55]], [[31], [29, 20, 11, 2], [45, 52, 59], [47]], [[30, 21, 12, 3], [46, 53, 60]], [[33, 26, 19, 12, 5], [49, 58]], [[34, 27, 20, 13, 6], [32], [48], [50, 59]], [[35, 28, 21, 14, 7], [33, 24], [49, 56], [51, 60]], [[36, 29, 22, 15], [34, 25, 16], [50, 57], [52, 61]], [[37, 30, 23], [35, 26, 17, 8], [51, 58], [53, 62]], [[38, 31], [36, 27, 18, 9, 0], [52, 59], [54, 63]], [[39], [37, 28, 19, 10, 1], [53, 60], [55]], [[38, 29, 20, 11, 2], [54, 61]], [[41, 34, 27, 20, 13, 6], [57]], [[42, 35, 28, 21, 14, 7], [40], [56], [58]], [[43, 36, 29, 22, 15], [41, 32], [57], [59]], [[44, 37, 30, 23], [42, 33, 24], [58], [60]], [[45, 38, 31], [43, 34, 25, 16], [59], [61]], [[46, 39], [44, 35, 26, 17, 8], [60], [62]], [[47], [45, 36, 27, 18, 9, 0], [61], [63]], [[46, 37, 28, 19, 10, 1], [62]], [[49, 42, 35, 28, 21, 14, 7]], [[50, 43, 36, 29, 22, 15], [48]], [[51, 44, 37, 30, 23], [49, 40]], [[52, 45, 38, 31], [50, 41, 32]], [[53, 46, 39], [51, 42, 33, 24]], [[54, 47], [52, 43, 34, 25, 16]], [[55], [53, 44, 35, 26, 17, 8]], [[54, 45, 36, 27, 18, 9, 0]]], 'B2': [[[9, 18, 27, 36, 45, 54, 63]], [[8], [10, 19, 28, 37, 46, 55]], [[9, 16], [11, 20, 29, 38, 47]], [[10, 17, 24], [12, 21, 30, 39]], [[11, 18, 25, 32], [13, 22, 31]], [[12, 19, 26, 33, 40], [14, 23]], [[13, 20, 27, 34, 41, 48], [15]], [[14, 21, 28, 35, 42, 49, 56]], [[1], [17, 26, 35, 44, 53, 62]], [[2], [0], [16], [18, 27, 36, 45, 54, 63]], [[3], [1], [17, 24], [19, 28, 37, 46, 55]], [[4], [2], [18, 25, 32], [20, 29, 38, 47]], [[5], [3], [19, 26, 33, 40], [21, 30, 39]], [[6], [4], [20, 27, 34, 41, 48], [22, 31]], [[7], [5], [21, 28, 35, 42, 49, 56], [23]], [[6], [22, 29, 36, 43, 50, 57]], [[9, 2], [25, 34, 43, 52, 61]], [[10, 3], [8], [24], [26, 35, 44, 53, 62]], [[11, 4], [9, 0], [25, 32], [27, 36, 45, 54, 63]], [[12, 5], [10, 1], [26, 33, 40], [28, 37, 46, 55]], [[13, 6], [11, 2], [27, 34, 41, 48], [29, 38, 47]], [[14, 7], [12, 3], [28, 35, 42, 49, 56], [30, 39]], [[15], [13, 4], [29, 36, 43, 50, 57], [31]], [[14, 5], [30, 37, 44, 51, 58]], [[17, 10, 3], [33, 42, 51, 60]], [[18, 11, 4], [16], [32], [34, 43, 52, 61]], [[19, 12, 5], [17, 8], [33, 40], [35, 44, 53, 62]], [[20, 13, 6], [18, 9, 0], [34, 41, 48], [36, 45, 54, 63]], [[21, 14, 7], [19, 10, 1], [35, 42, 49, 56], [37, 46, 55]], [[22, 15], [20, 11, 2], [36, 43, 50, 57], [38, 47]], [[23], [21, 12, 3], [37, 44, 51, 58], [39]], [[22, 13, 4], [38, 45, 52, 59]], [[25, 18, 11, 4], [41, 50, 59]], [[26, 19, 12, 5], [24], [40], [42, 51, 60]], [[27, 20, 13, 6], [25, 16], [41, 48], [43, 52, 61]], [[28, 21, 14, 7], [26, 17, 8], [42, 49, 56], [44, 53, 62]], [[29, 22, 15], [27, 18, 9, 0], [43, 50, 57], [45, 54, 63]], [[30, 23], [28, 19, 10, 1], [44, 51, 58], [46, 55]], [[31], [29, 20, 11, 2], [45, 52, 59], [47]], [[30, 21, 12, 3], [46, 53, 60]], [[33, 26, 19, 12, 5], [49, 58]], [[34, 27, 20, 13, 6], [32], [48], [50, 59]], [[35, 28, 21, 14, 7], [33, 24], [49, 56], [51, 60]], [[36, 29, 22, 15], [34, 25, 16], [50, 57], [52, 61]], [[37, 30, 23], [35, 26, 17, 8], [51, 58], [53, 62]], [[38, 31], [36, 27, 18, 9, 0], [52, 59], [54, 63]], [[39], [37, 28, 19, 10, 1], [53, 60], [55]], [[38, 29, 20, 11, 2], [54, 61]], [[41, 34, 27, 20, 13, 6], [57]], [[42, 35, 28, 21, 14, 7], [40], [56], [58]], [[43, 36, 29, 22, 15], [41, 32], [57], [59]], [[44, 37, 30, 23], [42, 33, 24], [58], [60]], [[45, 38, 31], [43, 34, 25, 16], [59], [61]], [[46, 39], [44, 35, 26, 17, 8], [60], [62]], [[47], [45, 36, 27, 18, 9, 0], [61], [63]], [[46, 37, 28, 19, 10, 1], [62]], [[49, 42, 35, 28, 21, 14, 7]], [[50, 43, 36, 29, 22, 15], [48]], [[51, 44, 37, 30, 23], [49, 40]], [[52, 45, 38, 31], [50, 41, 32]], [[53, 46, 39], [51, 42, 33, 24]], [[54, 47], [52, 43, 34, 25, 16]], [[55], [53, 44, 35, 26, 17, 8]], [[54, 45, 36, 27, 18, 9, 0]]], 'n1': [[[17], [10]], [[16], [18], [11]], [[8], [17], [19], [12]], [[9], [18], [20], [13]], [[10], [19], [21], [14]], [[11], [20], [22], [15]], [[12], [21], [23]], [[13], [22]], [[2], [25], [18]], [[3], [24], [26], [19]], [[4], [0], [16], [25], [27], [20]], [[5], [1], [17], [26], [28], [21]], [[6], [2], [18], [27], [29], [22]], [[7], [3], [19], [28], [30], [23]], [[4], [20], [29], [31]], [[5], [21], [30]], [[10], [1], [33], [26]], [[11], [2], [0], [32], [34], [27]], [[12], [3], [1], [8], [24], [33], [35], [28]], [[13], [4], [2], [9], [25], [34], [36], [29]], [[14], [5], [3], [10], [26], [35], [37], [30]], [[15], [6], [4], [11], [27], [36], [38], [31]], [[7], [5], [12], [28], [37], [39]], [[6], [13], [29], [38]], [[18], [9], [41], [34]], [[19], [10], [8], [40], [42], [35]], [[20], [11], [9], [16], [32], [41], [43], [36]], [[21], [12], [10], [17], [33], [42], [44], [37]], [[22], [13], [11], [18], [34], [43], [45], [38]], [[23], [14], [12], [19], [35], [44], [46], [39]], [[15], [13], [20], [36], [45], [47]], [[14], [21], [37], [46]], [[26], [17], [49], [42]], [[27], [18], [16], [48], [50], [43]], [[28], [19], [17], [24], [40], [49], [51], [44]], [[29], [20], [18], [25], [41], [50], [52], [45]], [[30], [21], [19], [26], [42], [51], [53], [46]], [[31], [22], [20], [27], [43], [52], [54], [47]], [[23], [21], [28], [44], [53], [55]], [[22], [29], [45], [54]], [[34], [25], [57], [50]], [[35], [26], [24], [56], [58], [51]], [[36], [27], [25], [32], [48], [57], [59], [52]], [[37], [28], [26], [33], [49], [58], [60], [53]], [[38], [29], [27], [34], [50], [59], [61], [54]], [[39], [30], [28], [35], [51], [60], [62], [55]], [[31], [29], [36], [52], [61], [63]], [[30], [37], [53], [62]], [[42], [33], [58]], [[43], [34], [32], [59]], [[44], [35], [33], [40], [56], [60]], [[45], [36], [34], [41], [57], [61]], [[46], [37], [35], [42], [58], [62]], [[47], [38], [36], [43], [59], [63]], [[39], [37], [44], [60]], [[38], [45], [61]], [[50], [41]], [[51], [42], [40]], [[52], [43], [41], [48]], [[53], [44], [42], [49]], [[54], [45], [43], [50]], [[55], [46], [44], [51]], [[47], [45], [52]], [[46], [53]]], 'n2': [[[17], [10]], [[16], [18], [11]], [[8], [17], [19], [12]], [[9], [18], [20], [13]], [[10], [19], [21], [14]], [[11], [20], [22], [15]], [[12], [21], [23]], [[13], [22]], [[2], [25], [18]], [[3], [24], [26], [19]], [[4], [0], [16], [25], [27], [20]], [[5], [1], [17], [26], [28], [21]], [[6], [2], [18], [27], [29], [22]], [[7], [3], [19], [28], [30], [23]], [[4], [20], [29], [31]], [[5], [21], [30]], [[10], [1], [33], [26]], [[11], [2], [0], [32], [34], [27]], [[12], [3], [1], [8], [24], [33], [35], [28]], [[13], [4], [2], [9], [25], [34], [36], [29]], [[14], [5], [3], [10], [26], [35], [37], [30]], [[15], [6], [4], [11], [27], [36], [38], [31]], [[7], [5], [12], [28], [37], [39]], [[6], [13], [29], [38]], [[18], [9], [41], [34]], [[19], [10], [8], [40], [42], [35]], [[20], [11], [9], [16], [32], [41], [43], [36]], [[21], [12], [10], [17], [33], [42], [44], [37]], [[22], [13], [11], [18], [34], [43], [45], [38]], [[23], [14], [12], [19], [35], [44], [46], [39]], [[15], [13], [20], [36], [45], [47]], [[14], [21], [37], [46]], [[26], [17], [49], [42]], [[27], [18], [16], [48], [50], [43]], [[28], [19], [17], [24], [40], [49], [51], [44]], [[29], [20], [18], [25], [41], [50], [52], [45]], [[30], [21], [19], [26], [42], [51], [53], [46]], [[31], [22], [20], [27], [43], [52], [54], [47]], [[23], [21], [28], [44], [53], [55]], [[22], [29], [45], [54]], [[34], [25], [57], [50]], [[35], [26], [24], [56], [58], [51]], [[36], [27], [25], [32], [48], [57], [59], [52]], [[37], [28], [26], [33], [49], [58], [60], [53]], [[38], [29], [27], [34], [50], [59], [61], [54]], [[39], [30], [28], [35], [51], [60], [62], [55]], [[31], [29], [36], [52], [61], [63]], [[30], [37], [53], [62]], [[42], [33], [58]], [[43], [34], [32], [59]], [[44], [35], [33], [40], [56], [60]], [[45], [36], [34], [41], [57], [61]], [[46], [37], [35], [42], [58], [62]], [[47], [38], [36], [43], [59], [63]], [[39], [37], [44], [60]], [[38], [45], [61]], [[50], [41]], [[51], [42], [40]], [[52], [43], [41], [48]], [[53], [44], [42], [49]], [[54], [45], [43], [50]], [[55], [46], [44], [51]], [[47], [45], [52]], [[46], [53]]], 'p8': [[], [], [], [], [], [], [], [], [[16, 24], [17]], [[16], [17, 25], [18]], [[17], [18, 26], [19]], [[18], [19, 27], [20]], [[19], [20, 28], [21]], [[20], [21, 29], [22]], [[21], [22, 30], [23]], [[22], [23, 31]], [[24], [25]], [[24], [25], [26]], [[25], [26], [27]], [[26], [27], [28]], [[27], [28], [29]], [[28], [29], [30]], [[29], [30], [31]], [[30], [31]], [[32], [33]], [[32], [33], [34]], [[33], [34], [35]], [[34], [35], [36]], [[35], [36], [37]], [[36], [37], [38]], [[37], [38], [39]], [[38], [39]], [[40], [41]], [[40], [41], [42]], [[41], [42], [43]], [[42], [43], [44]], [[43], [44], [45]], [[44], [45], [46]], [[45], [46], [47]], [[46], [47]], [[48], [49]], [[48], [49], [50]], [[49], [50], [51]], [[50], [51], [52]], [[51], [52], [53]], [[52], [53], [54]], [[53], [54], [55]], [[54], [55]], [[56], [57]], [[56], [57], [58]], [[57], [58], [59]], [[58], [59], [60]], [[59], [60], [61]], [[60], [61], [62]], [[61], [62], [63]], [[62], [63]], [], [], [], [], [], [], [], []]}
def valid_moves(state,bp_movement):
global MOVES
possible_moves=dict()
bk_movement=0
br_movement=[0,0]
for i in range(64):
if(state[i].islower() and state[i]!='null'):
if(state[i] in {'n1','n2','N1','N2'}):
possible_moves[state[i]]=list()
for j in MOVES[state[i]][i]:
for k in j:
if(state[k].islower() and state[k]!='null'):
pass
else:
possible_moves[state[i]].append([i,k])
else:
possible_moves[state[i]]=list()
for j in MOVES[state[i]][i]:
for k in j:
possibility=1
if(state[k].islower() and state[k]!='null'):
possibility=0
else:
x1=i%8
x2=k%8
y1=i//8
y2=k//8
diff=k-i
if(x1==x2):
if(y2>y1):
facter=8
else:
facter=-8
elif(y1==y2):
if(x2>x1):
facter=1
else:
facter=-1
elif(diff%7==0):
if(y2>y1):
facter=7
else:
facter=-7
else:
if(x2>x1):
facter=9
else:
facter=-9
l=i+facter
while(l!=k):
if(state[l]!='null'):
possibility=0
l=l+facter
#for cross_kill of pawn and en_passant
if(state[i][0]=='p'):
if(i>15 and k-i==16):
possibility=0
elif(k-i==9):
if(state[i+1][0]=='P' and bp_movement[x1+1]==1):
possibility=1
else:
if(state[k]=='null'):
possibility=0
elif(k-i==7):
if(state[i-1][0]=='P' and bp_movement[x1-1]==1):
possibility=1
else:
if(state[k]=='null'):
possibility=0
if(possibility==1):
possible_moves[state[i]].append([i,k])
else:
pass
#for chessling
if(state[4]=='k' and state[2]=='null' and state[3]=='null'):
if(bk_movement==0 and br_movement[0]==0 and state[0]=='r1' and state[1]=='null'):
pass
else:
possible_moves['k'].remove([4,2])
if(state[4]=='k' and state[5]=='null' and state[6]=='null'):
if(bk_movement==0 and br_movement[1]==0 and state[7]=='r2'):
pass
else:
possible_moves['k'].remove([4,6])
# restricting a pawn not to kill straight
for i in {'p1','p2','p3','p4','p5','p6','p7','p8'}:
try:
for j in possible_moves[i]:
if((j[1]-j[0])%8==0 and state[j[1]]!='null'):
k=possible_moves[i].index(j)
del possible_moves[i][k]
except:
pass
for pair in possible_moves.keys():
if(possible_moves[pair]==[]):
del possible_moves[pair]
else:
pass
return possible_moves
| 671.351852
| 69,434
| 0.358398
|
76bb16faba426c0f0276db7ab0fece03fbab9473
| 2,459
|
py
|
Python
|
tf-2-workflow/train_model/train.py
|
scott2b/amazon-sagemaker-script-mode
|
ccb7edf0cd9d1e77bd951bfaa48d14dc95ce2aca
|
[
"Apache-2.0"
] | 144
|
2019-02-05T21:03:30.000Z
|
2022-03-24T15:24:32.000Z
|
tf-2-workflow/train_model/train.py
|
kirit93/amazon-sagemaker-script-mode
|
095af07488889bb2655b741749d8740d3e11a49e
|
[
"Apache-2.0"
] | 22
|
2019-03-04T04:18:02.000Z
|
2022-03-09T00:21:36.000Z
|
tf-2-workflow/train_model/train.py
|
kirit93/amazon-sagemaker-script-mode
|
095af07488889bb2655b741749d8740d3e11a49e
|
[
"Apache-2.0"
] | 94
|
2019-02-05T21:03:33.000Z
|
2022-01-16T07:29:15.000Z
|
import argparse
import numpy as np
import os
import tensorflow as tf
from model_def import get_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def parse_args():
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--learning_rate', type=float, default=0.1)
# data directories
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
# model directory: we will use the default set by SageMaker, /opt/ml/model
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
return parser.parse_known_args()
def get_train_data(train_dir):
x_train = np.load(os.path.join(train_dir, 'x_train.npy'))
y_train = np.load(os.path.join(train_dir, 'y_train.npy'))
print('x train', x_train.shape,'y train', y_train.shape)
return x_train, y_train
def get_test_data(test_dir):
x_test = np.load(os.path.join(test_dir, 'x_test.npy'))
y_test = np.load(os.path.join(test_dir, 'y_test.npy'))
print('x test', x_test.shape,'y test', y_test.shape)
return x_test, y_test
if __name__ == "__main__":
args, _ = parse_args()
print('Training data location: {}'.format(args.train))
print('Test data location: {}'.format(args.test))
x_train, y_train = get_train_data(args.train)
x_test, y_test = get_test_data(args.test)
device = '/cpu:0'
print(device)
batch_size = args.batch_size
epochs = args.epochs
learning_rate = args.learning_rate
print('batch_size = {}, epochs = {}, learning rate = {}'.format(batch_size, epochs, learning_rate))
with tf.device(device):
model = get_model()
optimizer = tf.keras.optimizers.SGD(learning_rate)
model.compile(optimizer=optimizer, loss='mse')
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test))
# evaluate on test set
scores = model.evaluate(x_test, y_test, batch_size, verbose=2)
print("\nTest MSE :", scores)
# save model
model.save(args.model_dir + '/1')
| 31.126582
| 103
| 0.661651
|
bc32d65b9d98033ae3edb3bc37c04408370910dd
| 3,136
|
py
|
Python
|
test/test_elements.py
|
sigvaldm/symfem
|
5b51748a49015bb37839d1ca39bb26348b33936a
|
[
"MIT"
] | null | null | null |
test/test_elements.py
|
sigvaldm/symfem
|
5b51748a49015bb37839d1ca39bb26348b33936a
|
[
"MIT"
] | null | null | null |
test/test_elements.py
|
sigvaldm/symfem
|
5b51748a49015bb37839d1ca39bb26348b33936a
|
[
"MIT"
] | null | null | null |
import pytest
import symfem
from symfem import create_element
from symfem.symbolic import subs, x, all_symequal
from .utils import test_elements
def test_all_tested():
for e in symfem.create._elementlist:
for r in e.references:
if r == "dual polygon":
continue
for n in e.names:
if n in test_elements[r]:
break
else:
raise ValueError(f"{e.names[0]} on a {r} is not tested")
@pytest.mark.parametrize("ref, element, order", [
("triangle", "Hermite", 4),
("tetrahedron", "Crouzeix-Raviart", 2)
])
def test_too_high_order(ref, element, order):
with pytest.raises(ValueError):
symfem.create_element(ref, element, order)
@pytest.mark.parametrize("ref, element, order", [
("triangle", "Hermite", 2),
("tetrahedron", "bubble", 3)
])
def test_too_low_order(ref, element, order):
with pytest.raises(ValueError):
symfem.create_element(ref, element, order)
@pytest.mark.parametrize(
("cell_type", "element_type", "order", "kwargs"),
[[reference, element, order, kwargs]
for reference, i in test_elements.items() for element, j in i.items()
for kwargs, k in j for order in k])
def test_element(
elements_to_test, cells_to_test, cell_type, element_type, order, kwargs,
speed
):
"""Run tests for each element."""
if elements_to_test != "ALL" and element_type not in elements_to_test:
pytest.skip()
if cells_to_test != "ALL" and cell_type not in cells_to_test:
pytest.skip()
if speed == "fast":
if order > 2:
pytest.skip()
if order == 2 and cell_type in ["tetrahedron", "hexahedron", "prism", "pyramid"]:
pytest.skip()
space = create_element(cell_type, element_type, order, **kwargs)
space.test()
@pytest.mark.parametrize("n_tri", [3, 4, 6, 8])
@pytest.mark.parametrize("order", range(2))
def test_dual_elements(elements_to_test, cells_to_test, n_tri, order):
if elements_to_test != "ALL" and "dual" not in elements_to_test:
pytest.skip()
if cells_to_test != "ALL" and "dual polygon" not in cells_to_test:
pytest.skip()
space = create_element(f"dual polygon({n_tri})", "dual", order)
sub_e = create_element("triangle", space.fine_space, space.order)
for f, coeff_list in zip(space.get_basis_functions(), space.dual_coefficients):
for piece, coeffs in zip(f.pieces, coeff_list):
map = sub_e.reference.get_map_to(piece[0])
for dof, value in zip(sub_e.dofs, coeffs):
point = subs(map, x, dof.point)
assert all_symequal(value, subs(piece[1], x, point))
@pytest.mark.parametrize("n_tri", [3, 4])
@pytest.mark.parametrize("element_type", ["BC", "RBC"])
def test_bc_elements(elements_to_test, cells_to_test, n_tri, element_type):
if elements_to_test != "ALL" and element_type not in elements_to_test:
pytest.skip()
if cells_to_test != "ALL" and "dual polygon" not in cells_to_test:
pytest.skip()
create_element(f"dual polygon({n_tri})", element_type, 1)
| 35.235955
| 89
| 0.648916
|
1024b27ee6d88e721c501207317fa21837ff99f9
| 17,628
|
py
|
Python
|
4_validate_model/2_validation_plots.py
|
opentargets/genetics-l2g-scoring
|
79a660ddb86adb20453de8d5589262e429c13d09
|
[
"Apache-2.0"
] | 4
|
2020-05-04T20:03:13.000Z
|
2020-08-12T16:22:54.000Z
|
4_validate_model/2_validation_plots.py
|
opentargets/genetics-l2g-scoring
|
79a660ddb86adb20453de8d5589262e429c13d09
|
[
"Apache-2.0"
] | null | null | null |
4_validate_model/2_validation_plots.py
|
opentargets/genetics-l2g-scoring
|
79a660ddb86adb20453de8d5589262e429c13d09
|
[
"Apache-2.0"
] | 3
|
2020-05-07T20:31:52.000Z
|
2021-11-04T11:09:13.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ed Mountjoy
#
'''
Analyses predictions
'''
import sys
import os
import pandas as pd
from pprint import pprint
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sklearn.metrics as skmet
from sklearn.metrics import precision_recall_curve, average_precision_score
from sklearn.metrics import roc_curve, auc
from sklearn.calibration import calibration_curve
import argparse
def main():
pd.options.mode.chained_assignment = None
pd.set_option('display.max_columns', 500)
# Parse args
global args
args = parse_args()
# Allow plots or classification report to be switched off
do_plots = True
do_clf_report = True
# Define metrics for the classification report
metrics = {
# Scores requiring predictions only
'y_pred': {
'accuracy_score': skmet.accuracy_score,
'balanced_accuracy_score': skmet.balanced_accuracy_score,
# 'cohen_kappa_score': skmet.cohen_kappa_score,
'f1_score': skmet.f1_score,
# 'hamming_loss': skmet.hamming_loss,
# 'jaccard_score': skmet.jaccard_score,
# 'log_loss': skmet.log_loss,
# 'matthews_corrcoef': skmet.matthews_corrcoef,
'precision_score': skmet.precision_score,
'recall_score': skmet.recall_score,
# 'zero_one_loss': skmet.zero_one_loss
},
# Scores requiring class probabilities
'y_proba': {
'average_precision_score': skmet.average_precision_score,
# 'brier_score_loss': skmet.brier_score_loss,
'roc_auc_score': skmet.roc_auc_score
}
}
#
# Load --------------------------------------------------------------------
#
# Load predictions
pred = pd.read_parquet(args.in_pred)
# Load feature importance information
with open(args.in_ftimp, 'r') as in_h:
ft_imp = json.load(in_h)
#
# Check predictions -------------------------------------------------------
#
# Count how many loci in total
top_loci_count = (
pred
.loc[:, ['study_id', 'chrom', 'pos', 'ref', 'alt', 'gene_id']]
.drop_duplicates()
.shape[0]
)
# Count how many predictions per classifier
pred_counts = (
pred
.groupby(['clf_classifier_name', 'clf_feature_name', 'clf_gold_standard_set'])
.study_id
.size()
.reset_index()
)
# Make sure the correct number of loci have been predicted
# print('Warning: skipping assertation!\n')
assert (pred_counts['study_id'] == top_loci_count).all()
#
# Process predictions -------------------------------------------
#
# Initiate classification report
clf_report = []
# Group predictions
pred_grp = pred.groupby([
'clf_classifier_name',
'clf_feature_name',
'clf_gold_standard_set'])
# Iterate over training groups
for (clf, ft, gs_training), group in pred_grp:
print('Processing', clf, ft, gs_training, '...')
# Make testing gold-standard sets. This is the same as in 1_train_models.py
gs_sets = {
'high_medium_low': group['gs_confidence'].isin(['High', 'Medium', 'Low']),
'high_medium': group['gs_confidence'].isin(['High', 'Medium']),
'high': group['gs_confidence'].isin(['High']),
'sumstat_only': group['has_sumstats'] == 1,
'progem': group['gs_set'] == 'ProGeM',
't2d': group['gs_set'] == 'T2D Knowledge Portal ',
'chembl_all': group['gs_set'].isin(['ChEMBL_IV', 'ChEMBL_III', 'ChEMBL_II']),
'chembl_excl_II': group['gs_set'].isin(['ChEMBL_IV', 'ChEMBL_III']),
'fauman_twitter': group['gs_set'].isin(['Eric Fauman Twitter']),
'ot_curated': group['gs_set'].isin(['otg_curated_191108']),
}
# Iterate over testing gold-standard sets
for gs_test, gs_set in gs_sets.items():
# Subset rows of the group dataset
group_subset = group.loc[gs_set, :]
# Skip if empty
if group_subset.shape[0] == 0:
print('Warning: gs_test={} subset is empty, skipping...'.format(gs_test))
continue
#
# Make plots ----------------------------------------------------------
#
if do_plots:
# Make outname
out_name = '{}-{}-{}-{}.figure.png'.format(clf, ft, gs_training, gs_test)
out_path = os.path.join(*[
args.out_plotdir,
clf,
'training=' + gs_training,
out_name
])
os.makedirs(os.path.dirname(out_path), exist_ok=True)
# Initiate figure
fig = plt.figure(figsize=(15, 10), dpi=300)
fig.suptitle(' '.join([
clf,
'training=' + gs_training,
'testing=' + gs_test,
ft]))
grid_spec = gridspec.GridSpec(6, 2)
# Make plot data
y_true = group_subset['gold_standard_status'].tolist()
y_pred = group_subset['y_pred'].tolist()
y_proba = group_subset['y_proba'].tolist()
fold_data = []
for fold_name, fold_grp in group_subset.groupby('clf_fold_name'):
fold_data.append({
'y_true': fold_grp['gold_standard_status'].tolist(),
'y_pred': fold_grp['y_pred'].tolist(),
'y_proba': fold_grp['y_proba'].tolist(),
'fold_name': fold_name,
'ft_imp': ft_imp[clf][ft][gs_training][fold_name]['feature_importances']
})
# Plot precision-recall curve
ax_prc = plt.subplot(grid_spec[0:2, 0])
ax_prc = plot_precision_recall_curve(
y_true=y_true,
probas_pred=y_proba,
ax=ax_prc,
subtitle=None,
fold_data=fold_data)
fig.add_subplot(ax_prc)
# Plot ROC curve
ax_roc = plt.subplot(grid_spec[0:2, 1])
ax_roc = plot_roc_curve(
y_true=y_true,
probas_pred=y_proba,
ax=ax_roc,
subtitle=None,
fold_data=fold_data)
fig.add_subplot(ax_roc)
# Plot calibration curve
ax_cal_curve = plt.subplot(grid_spec[3:5, 0])
ax_cal_hist = plt.subplot(grid_spec[5, 0])
ax_cal_curve, ax_cal_hist = plot_calibration_curve(
y_true=y_true,
probas_pred=y_proba,
ax_curve=ax_cal_curve,
ax_hist=ax_cal_hist,
subtitle=None,
fold_data=fold_data)
fig.add_subplot(ax_cal_curve)
fig.add_subplot(ax_cal_hist)
# Feature importances
ax_ftimp = plt.subplot(grid_spec[3:5, 1])
ax_ftimp = plot_feature_importances(
fold_data=fold_data,
feature_names=ft_imp[clf][ft]['feature_names'],
ax=ax_ftimp,
subtitle=None)
fig.add_subplot(ax_ftimp)
# Plot and save figure
plt.savefig(out_path)
plt.close()
#
# Make classification report ------------------------------------------
#
if do_clf_report:
# Initiate output for this row
clf_row = {
'clf_name': clf,
'feature_set': ft,
'goldstandard_training': gs_training,
'goldstandard_testing': gs_test
}
# Calculate metrics for y_pred and y_proba
for metric_type in metrics:
for metric in metrics[metric_type]:
# Calc
score = metrics[metric_type][metric](
group_subset['gold_standard_status'].tolist(),
group_subset[metric_type].tolist())
# Add to report
clf_row[metric] = score
# Calculate confusion matrix
tn, fp, fn, tp = skmet.confusion_matrix(
group_subset['gold_standard_status'].tolist(),
group_subset['y_pred'].tolist()
).ravel()
clf_row['true_negatives'] = tn
clf_row['false_positives'] = fp
clf_row['false_negatives'] = fn
clf_row['true_positives'] = tp
# Add derivations from confusion matrix
clf_row['tpr/sensitivity'] = tp / (tp + fn)
clf_row['tnr/specificity'] = tn / (tn + fp)
clf_row['fpr/fallout'] = fp / (fp + tn)
clf_row['fnr/missrate'] = fn / (fn + tp)
clf_row['fdr'] = fp / (fp + tp)
# Support (number of lables for each class)
clf_row['support_1'] = (
group_subset['gold_standard_status'] == 1
).sum()
clf_row['support_0'] = (
group_subset['gold_standard_status'] == 0
).sum()
clf_report.append(clf_row)
# break
#
# Write classification report ---------------------------------------------
#
print('Writing classification report...')
# Convert to df
clf_df = pd.DataFrame(clf_report)
# Write
os.makedirs(os.path.dirname(args.out_report), exist_ok=True)
clf_df.to_csv(args.out_report, sep='\t', index=True, index_label='idx')
return 0
def plot_precision_recall_curve(y_true, probas_pred, ax, subtitle,
fold_data=None):
''' Makes a precision-recall curve
Params:
y_true (list): true classes
probas_pred (list): prediction probability for classes
subtitle (str): Plot subtitle
ax (matplotlib.ax): axis
fold_data (list): data for each individual folds
Returns:
ax
'''
# Plot main result
precision, recall, _ = precision_recall_curve(y_true, probas_pred)
average_precision = average_precision_score(y_true, probas_pred)
ax.step(recall, precision, color='b', alpha=0.8,
where='post', label='Overall AP = {:.2f}'.format(average_precision))
# Plot each fold
if fold_data:
for fold in fold_data:
precision, recall, _ = precision_recall_curve(
fold['y_true'], fold['y_proba'])
average_precision = average_precision_score(
fold['y_true'], fold['y_proba'])
ax.step(recall, precision, alpha=0.2,
where='post', label='{0} AP = {1:.2f}'.format(
fold['fold_name'], average_precision)
)
# Add labels
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_ylim([0.0, 1.05])
ax.set_xlim([0.0, 1.0])
ax.legend(loc="best", prop={'size': 6})
if subtitle:
ax.set_title('Precision-Recall Curve\n{}'.format(title))
else:
ax.set_title('Precision-Recall Curve')
return ax
def plot_roc_curve(y_true, probas_pred, ax, subtitle=None,
fold_data=None):
''' Makes ROC curve
Params:
y_true (list): true classes
probas_pred (list): prediction probability for classes
subtitle (str): Plot subtitle
ax (matplotlib.figure): ax
fold_data (list): data for each individual folds
Returns:
ax
'''
ax.plot([0, 1], [0, 1], "k:")
# Plot main result
fpr, tpr, _ = roc_curve(y_true, probas_pred)
score = auc(fpr, tpr)
ax.plot(fpr, tpr, color='b', alpha=0.8,
label='Overall AUC = {:.2f}'.format(score))
# Plot each fold
for fold in fold_data:
fpr, tpr, _ = roc_curve(
fold['y_true'], fold['y_proba'])
score = auc(fpr, tpr)
ax.plot(fpr, tpr, alpha=0.2,
label='{0} AUC = {1:.2f}'.format(
fold['fold_name'], score)
)
# Add labels
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_ylim([0.0, 1.05])
ax.set_xlim([0.0, 1.0])
ax.legend(loc="best", prop={'size': 6})
if subtitle:
ax.set_title('Receiver Operating Characteristic\n{}'.format(title))
else:
ax.set_title('Receiver Operating Characteristic')
return ax
def plot_calibration_curve(y_true, probas_pred, ax_curve, ax_hist,
subtitle, fold_data=None, n_bins=10):
''' Makes a calibration curve
Params:
y_true (list): true classes
probas_pred (list): prediction probability for classes
subtitle (str): Plot subtitle
ax_curve (matplotlib.ax): axis to plot the curve on
ax_hist (matplotlib.ax): axis to plot the histogram on
fold_data (list): data for each individual folds
Returns:
ax
'''
# Plot perfectly calibrated line
ax_curve.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
# Plot main result
fraction_of_positives, mean_predicted_value = calibration_curve(
y_true, probas_pred, n_bins=n_bins)
ax_curve.plot(mean_predicted_value, fraction_of_positives, "s-",
label='Overall', alpha=0.8, color='b')
ax_hist.hist(probas_pred, range=(0, 1), bins=10, label='Overall',
histtype="step", lw=2, color='b', alpha=0.8)
# Plot each fold
if fold_data:
for fold in fold_data:
fraction_of_positives, mean_predicted_value = \
calibration_curve(fold['y_true'], fold['y_proba'],
n_bins=n_bins)
ax_curve.plot(mean_predicted_value, fraction_of_positives, "s-",
label=fold['fold_name'], alpha=0.2)
ax_hist.hist(probas_pred, range=(0, 1), bins=10,
label=fold['fold_name'],
histtype="step", lw=2, alpha=0.2)
# Add labels to histogram
ax_hist.set_xlabel('Mean predicted value')
ax_hist.set_ylabel('Count')
ax_hist.set_xlim([0.0, 1.0])
# Add labels to curve
ax_curve.set_xticklabels([])
ax_curve.set_ylabel('Fraction of positives')
ax_curve.set_ylim([0.0, 1.05])
ax_curve.set_xlim([0.0, 1.0])
ax_curve.legend(loc="best", prop={'size': 6})
if subtitle:
ax_curve.set_title('Calibration Curve\n{}'.format(title))
else:
ax_curve.set_title('Calibration Curve')
return ax_curve, ax_hist
def plot_feature_importances(fold_data, feature_names, ax, subtitle=None):
''' Makes a plot of feature importances
Params:
fold_data (list of dicts): data for each individual folds
feature_names (list): list of feature names
ax (matplotlib.figure): ax
subtitle (str): Plot subtitle
Returns:
ax
'''
bar_width = 0.5
# Calculate mean feature importance across folds
ft_imps = np.array([fold['ft_imp'] for fold in fold_data])
ft_imps_mean = list(np.mean(ft_imps, axis=0))
# Plot main result
x_min_pos = [x - bar_width for x in range(len(ft_imps_mean))]
x_max_pos = [x + bar_width for x in range(len(ft_imps_mean))]
ax.hlines(ft_imps_mean, x_min_pos, x_max_pos, label='Overall', colors='b', alpha=0.8)
# Plot each fold
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
for i, fold in enumerate(fold_data):
ax.hlines(
fold['ft_imp'],
x_min_pos,
x_max_pos,
label=fold['fold_name'],
color=colors[i],
alpha=0.2
)
# Add horizontal lines
ax.axhline(y=np.max(ft_imps_mean), linestyle='--', alpha=0.2)
ax.axhline(y=np.min(ft_imps_mean), linestyle='--', alpha=0.2)
# Add vertical lines
for pos in x_min_pos[0:1] + x_max_pos:
ax.axvline(pos, linestyle='-', alpha=0.1)
# Add labels
ax.set_ylabel('Importance')
ax.set_xticks(range(len(ft_imps_mean)))
ax.set_xticklabels(feature_names)
ax.xaxis.set_tick_params(rotation=90)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': 6})
if subtitle:
ax.set_title('Feature Importances\n{}'.format(title))
else:
ax.set_title('Feature Importances')
return ax
def parse_args():
""" Load command line args """
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--in_pred', metavar="<parquet>", help="Input parquet containing predictions", type=str, required=True)
parser.add_argument('--in_ftimp', metavar="<str>", help="Input json containing feature importances", type=str, required=True)
# Outputs
parser.add_argument('--out_plotdir', metavar="<dir>", help="Output directory to write plots", type=str, required=True)
parser.add_argument('--out_report', metavar="<file>", help="Out path for classification report", type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| 34.769231
| 129
| 0.554005
|
78540cdf82a79ce5df2a465910fb029815d8c1e5
| 1,071
|
py
|
Python
|
_assignments/basics/function/function_parameters_c.py
|
sages-pl/2022-01-pythonsqlalchemy-aptiv
|
1d6d856608e9dbe25b139e8968c48b7f46753b84
|
[
"MIT"
] | null | null | null |
_assignments/basics/function/function_parameters_c.py
|
sages-pl/2022-01-pythonsqlalchemy-aptiv
|
1d6d856608e9dbe25b139e8968c48b7f46753b84
|
[
"MIT"
] | null | null | null |
_assignments/basics/function/function_parameters_c.py
|
sages-pl/2022-01-pythonsqlalchemy-aptiv
|
1d6d856608e9dbe25b139e8968c48b7f46753b84
|
[
"MIT"
] | null | null | null |
"""
* Assignment: Function Parameters Default
* Required: yes
* Complexity: easy
* Lines of code: 4 lines
* Time: 3 min
English:
1. Define function `default` with two parameters
2. Parameter `a` is required
3. Parameter `b` is optional and has default value `None`
4. If only one argument was passed, consider second equal to the first one
5. Return `a` and `b` as a `dict`, i.e. {'a': 1, 'b': 1}
6. Run doctests - all must succeed
Polish:
1. Zdefiniuj funkcję `default` z dwoma parametrami
2. Parametr `a` jest wymagany
3. Parametr `b` jest opcjonalny i ma domyślną wartość `None`
4. Jeżeli tylko jeden argument był podany, przyjmij drugi równy pierwszemu
5. Zwróć `a` i `b` jako `dict`, np. {'a': 1, 'b': 1}
6. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> from inspect import isfunction
>>> isfunction(default)
True
>>> type(default(0,0)) is dict
True
>>> default(1)
{'a': 1, 'b': 1}
>>> default(2, 3)
{'a': 2, 'b': 3}
"""
| 27.461538
| 78
| 0.628385
|
c1fd6b71c4780e71e95cf64f65e3d9ba55cfcde9
| 12,744
|
py
|
Python
|
6-0001-fall-2016/ps3/ps3.py
|
mekilis/MIT-Edx-Ocw
|
cd78a28c64c98fb8abf0ab147bc8db4cf880e980
|
[
"MIT"
] | null | null | null |
6-0001-fall-2016/ps3/ps3.py
|
mekilis/MIT-Edx-Ocw
|
cd78a28c64c98fb8abf0ab147bc8db4cf880e980
|
[
"MIT"
] | null | null | null |
6-0001-fall-2016/ps3/ps3.py
|
mekilis/MIT-Edx-Ocw
|
cd78a28c64c98fb8abf0ab147bc8db4cf880e980
|
[
"MIT"
] | null | null | null |
# 6.0001 Problem Set 3
#
# The 6.0001 Word Game
# Created by: Kevin Luu <luuk> and Jenna Wiens <jwiens>
#
# Name : <your name>
# Collaborators : <your collaborators>
# Time spent : <total time>
import math
import random
import string
VOWELS = 'aeiou*'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordlist: list of strings
wordlist = []
for line in inFile:
wordlist.append(line.strip().lower())
print(" ", len(wordlist), "words loaded.")
return wordlist
def get_frequency_dict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def get_word_score(word, n):
"""
Returns the score for a word. Assumes the word is a
valid word.
You may assume that the input word is always either a string of letters,
or the empty string "". You may not assume that the string will only contain
lowercase letters, so you will have to handle uppercase and mixed case strings
appropriately.
The score for a word is the product of two components:
The first component is the sum of the points for letters in the word.
The second component is the larger of:
1, or
7*wordlen - 3*(n-wordlen), where wordlen is the length of the word
and n is the hand length when the word was played
Letters are scored as in Scrabble; A is worth 1, B is
worth 3, C is worth 3, D is worth 2, E is worth 1, and so on.
word: string
n: int >= 0
returns: int >= 0
"""
word = word.lower()
word_length = len(word)
score = 0
for ch in word:
if ch in SCRABBLE_LETTER_VALUES:
score += SCRABBLE_LETTER_VALUES[ch]
score *= max(1, HAND_SIZE*word_length - 3*(n-word_length))
return score
#
# Make sure you understand how this function works and what it does!
#
def display_hand(hand):
"""
Displays the letters currently in the hand.
For example:
display_hand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print(letter, end=' ') # print all on the same line
print() # print an empty line
#
# Make sure you understand how this function works and what it does!
# You will need to modify this for Problem #4.
#
def deal_hand(n):
"""
Returns a random hand containing n lowercase letters.
ceil(n/3) letters in the hand should be VOWELS (note,
ceil(n/3) means the smallest integer not less than n/3).
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
num_vowels = int(math.ceil(n / 3))
for i in range(num_vowels):
x = random.choice(VOWELS)
while x == '*' and x in hand:
x = random.choice(VOWELS)
hand[x] = hand.get(x, 0) + 1
for i in range(num_vowels, n):
x = random.choice(CONSONANTS)
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def update_hand(hand, word):
"""
Does NOT assume that hand contains every letter in word at least as
many times as the letter appears in word. Letters in word that don't
appear in hand should be ignored. Letters that appear in word more times
than in hand should never result in a negative count; instead, set the
count in the returned hand to 0 (or remove the letter from the
dictionary, depending on how your code is structured).
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
word = word.lower()
new_hand = hand.copy()
for ch in word:
if ch in new_hand:
new_hand[ch] -= 1
if new_hand[ch] == 0:
del(new_hand[ch])
return new_hand
#
# Problem #3: Test word validity
#
def is_valid_word(word, hand, word_list):
"""
Returns True if word is in the word_list and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or word_list.
word: string
hand: dictionary (string -> int)
word_list: list of lowercase strings
returns: boolean
"""
word = word.lower()
wildcard = '*' in word
match_found = False
if not wildcard:
if word not in word_list:
return False
for ch in word:
if ch not in hand or word.count(ch) > hand[ch]:
return False
# wild card exists
else:
word_copy = word
i = word.index('*')
for ch in 'aeiou':
word_copy = word_copy[:i] + ch + word_copy[i+1:]
if word_copy in word_list:
match_found = True
break
if wildcard and not match_found:
return False
return True
#
# Problem #5: Playing a hand
#
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
n = 0
for k in hand:
v = hand[k]
if v > 0:
n += v
return n
def play_hand(hand, word_list):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word.
* When any word is entered (valid or invalid), it uses up letters
from the hand.
* An invalid word is rejected, and a message is displayed asking
the user to choose another word.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters.
The user can also finish playing the hand by inputing two
exclamation points (the string '!!') instead of a word.
hand: dictionary (string -> int)
word_list: list of lowercase strings
returns: the total score for the hand
"""
# BEGIN PSEUDOCODE <-- Remove this comment when you implement this function
# Keep track of the total score)
score = 0
# As long as there are still letters left in the hand:
lettersRanOut = True
n = calculate_handlen(hand)
while n > 0:
# Display the hand
print("\nCurrent Hand:", end=' ')
display_hand(hand)
# Ask user for input
word = input('Enter word, or "!!" to indicate that you are finished: ')
word = word.strip()
# If the input is two exclamation points:
if word == "!!":
# End the game (break out of the loop)
lettersRanOut = False
break
# Otherwise (the input is not two exclamation points):
else:
# If the word is valid:
if is_valid_word(word, hand, word_list):
# Tell the user how many points the word earned,
# and the updated total score
x = get_word_score(word, n)
score += x
print("\"%s\" earned %d points. Total: %d points" % (word, x, score))
# Otherwise (the word is not valid):
else:
# Reject invalid word (print a message)
print("That is not a valid word. Please choose another word.")
# update the user's hand by removing the letters of their inputted word
hand = update_hand(hand, word)
n = calculate_handlen(hand)
# Game is over (user entered '!!' or ran out of letters),
# so tell user the total score
if lettersRanOut:
print("\nRan out of letters.", end=' ')
print("Total score: %d points" % score)
# Return the total score as result of function
return score
#
# Problem #6: Playing a game
#
#
# procedure you will use to substitute a letter in a hand
#
def substitute_hand(hand, letter):
"""
Allow the user to replace all copies of one letter in the hand (chosen by user)
with a new letter chosen from the VOWELS and CONSONANTS at random. The new letter
should be different from user's choice, and should not be any of the letters
already in the hand.
If user provide a letter not in the hand, the hand should be the same.
Has no side effects: does not mutate hand.
For example:
substitute_hand({'h':1, 'e':1, 'l':2, 'o':1}, 'l')
might return:
{'h':1, 'e':1, 'o':1, 'x':2} -> if the new letter is 'x'
The new letter should not be 'h', 'e', 'l', or 'o' since those letters were
already in the hand.
hand: dictionary (string -> int)
letter: string
returns: dictionary (string -> int)
"""
pass # TO DO... Remove this line when you implement this function
def play_game(word_list):
"""
Allow the user to play a series of hands
* Asks the user to input a total number of hands
* Accumulates the score for each hand into a total score for the
entire series
* For each hand, before playing, ask the user if they want to substitute
one letter for another. If the user inputs 'yes', prompt them for their
desired letter. This can only be done once during the game. Once the
substitue option is used, the user should not be asked if they want to
substitute letters in the future.
* For each hand, ask the user if they would like to replay the hand.
If the user inputs 'yes', they will replay the hand and keep
the better of the two scores for that hand. This can only be done once
during the game. Once the replay option is used, the user should not
be asked if they want to replay future hands. Replaying the hand does
not count as one of the total number of hands the user initially
wanted to play.
* Note: if you replay a hand, you do not get the option to substitute
a letter - you must play whatever hand you just had.
* Returns the total score for the series of hands
word_list: list of lowercase strings
"""
print("play_game not implemented.") # TO DO... Remove this line when you implement this function
#
# Build data structures used for entire session and play game
# Do not remove the "if __name__ == '__main__':" line - this code is executed
# when the program is run directly, instead of through an import statement
#
if __name__ == '__main__':
word_list = load_words()
#play_hand({'a':1, 'j': 1, 'e': 1, 'f': 1, '*': 1, 'r': 1, 'x': 1}, word_list)
play_hand({'a':1, 'c': 1, 'f': 1, 'i': 1, '*': 1, 't': 1, 'x': 1}, word_list)
play_game(word_list)
play_game(word_list)
| 31.544554
| 213
| 0.591886
|
370924cdfa199c3b19fd232e3a91e25a0272ce05
| 4,252
|
py
|
Python
|
scripts/sync_flake8_versions.py
|
umangino/pandas
|
c492672699110fe711b7f76ded5828ff24bce5ab
|
[
"BSD-3-Clause"
] | 3
|
2018-04-24T13:31:51.000Z
|
2019-07-09T07:31:43.000Z
|
scripts/sync_flake8_versions.py
|
umangino/pandas
|
c492672699110fe711b7f76ded5828ff24bce5ab
|
[
"BSD-3-Clause"
] | 354
|
2018-05-26T13:28:49.000Z
|
2022-03-31T01:11:46.000Z
|
scripts/sync_flake8_versions.py
|
umangino/pandas
|
c492672699110fe711b7f76ded5828ff24bce5ab
|
[
"BSD-3-Clause"
] | 5
|
2018-04-24T13:31:56.000Z
|
2021-10-21T05:06:23.000Z
|
"""
Check that the flake8 (and pandas-dev-flaker) pins are the same in:
- environment.yml
- .pre-commit-config.yaml, in the flake8 hook
- .pre-commit-config.yaml, in the additional dependencies of the yesqa hook
The flake8 hook revision in .pre-commit-config.yaml is taken as the reference revision.
Usage: either
- ``python scripts/sync_flake8_versions.py``, or
- ``pre-commit run sync-flake8-versions --all-files``.
"""
from __future__ import annotations
from dataclasses import (
dataclass,
replace,
)
import sys
from typing import (
Any,
Mapping,
Sequence,
TypeVar,
)
import yaml
@dataclass
class Revision:
name: str
compare: str
version: str
@dataclass
class Revisions:
name: str
pre_commit: Revision | None = None
yesqa: Revision | None = None
environment: Revision | None = None
YamlMapping = Mapping[str, Any]
Repo = TypeVar("Repo", bound=YamlMapping)
COMPARE = ("<=", "==", ">=", "<", ">", "=")
def _get_repo_hook(repos: Sequence[Repo], hook_name: str) -> tuple[Repo, YamlMapping]:
for repo in repos:
for hook in repo["hooks"]:
if hook["id"] == hook_name:
return repo, hook
else: # pragma: no cover
raise RuntimeError(f"Repo with hook {hook_name} not found")
def _conda_to_pip_compat(dep):
if dep.compare == "=":
return replace(dep, compare="==")
else:
return dep
def _validate_additional_dependencies(
flake8_additional_dependencies,
environment_additional_dependencies,
) -> None:
for dep in flake8_additional_dependencies:
if dep not in environment_additional_dependencies:
sys.stdout.write(
f"Mismatch of '{dep.name}' version between 'enviroment.yml' "
"and additional dependencies of 'flake8' in '.pre-commit-config.yaml'\n"
)
sys.exit(1)
def _validate_revisions(revisions):
if revisions.environment != revisions.pre_commit:
sys.stdout.write(
f"{revisions.name} in 'environment.yml' does not "
"match in 'flake8' from 'pre-commit'\n"
)
sys.exit(1)
def _process_dependencies(deps):
for dep in deps:
if isinstance(dep, str):
for compare in COMPARE:
if compare in dep:
pkg, rev = dep.split(compare, maxsplit=1)
yield _conda_to_pip_compat(Revision(pkg, compare, rev))
break
else:
yield from _process_dependencies(dep["pip"])
def get_revisions(
precommit_config: YamlMapping, environment: YamlMapping
) -> tuple[Revisions, Revisions]:
flake8_revisions = Revisions(name="flake8")
pandas_dev_flaker_revisions = Revisions(name="pandas-dev-flaker")
repos = precommit_config["repos"]
flake8_repo, flake8_hook = _get_repo_hook(repos, "flake8")
flake8_revisions.pre_commit = Revision("flake8", "==", flake8_repo["rev"])
flake8_additional_dependencies = []
for dep in _process_dependencies(flake8_hook.get("additional_dependencies", [])):
if dep.name == "pandas-dev-flaker":
pandas_dev_flaker_revisions.pre_commit = dep
else:
flake8_additional_dependencies.append(dep)
environment_dependencies = environment["dependencies"]
environment_additional_dependencies = []
for dep in _process_dependencies(environment_dependencies):
if dep.name == "flake8":
flake8_revisions.environment = dep
environment_additional_dependencies.append(dep)
elif dep.name == "pandas-dev-flaker":
pandas_dev_flaker_revisions.environment = dep
else:
environment_additional_dependencies.append(dep)
_validate_additional_dependencies(
flake8_additional_dependencies,
environment_additional_dependencies,
)
for revisions in flake8_revisions, pandas_dev_flaker_revisions:
_validate_revisions(revisions)
if __name__ == "__main__":
with open(".pre-commit-config.yaml") as fd:
precommit_config = yaml.safe_load(fd)
with open("environment.yml") as fd:
environment = yaml.safe_load(fd)
get_revisions(precommit_config, environment)
sys.exit(0)
| 29.123288
| 88
| 0.662512
|
5126ba215b52f9a4f5ee29aa91f6c9819cb8872b
| 3,428
|
py
|
Python
|
mineapy/tutorials/e_coli_gem_tutorials.py
|
vpandey-om/mineapy
|
a533196244d17aa69a5846eb6e197bd4899797b0
|
[
"Apache-2.0"
] | null | null | null |
mineapy/tutorials/e_coli_gem_tutorials.py
|
vpandey-om/mineapy
|
a533196244d17aa69a5846eb6e197bd4899797b0
|
[
"Apache-2.0"
] | null | null | null |
mineapy/tutorials/e_coli_gem_tutorials.py
|
vpandey-om/mineapy
|
a533196244d17aa69a5846eb6e197bd4899797b0
|
[
"Apache-2.0"
] | null | null | null |
import mineapy
from cobra.io import load_matlab_model,load_json_model
import pandas as pd
from mineapy.core.taskEnrich import TaskEnrichment
from mineapy.core.thermo_model import ThermoModel_WithoutInfo
from mineapy.core.rxnExp import ReactionExp
import pytfa
from pytfa.io import read_compartment_data, apply_compartment_data, \
read_lexicon, annotate_from_lexicon
from pytfa.io.base import load_thermoDB
from pytfa.thermo.tmodel import ThermoModel
from os.path import join
cobra_model = load_json_model('./models/iJO1366.json')
genes=[g.id for g in cobra_model.genes]
path_to_params = './input/task_enrichment_gem_params.yaml'
context_df=pd.read_csv('./input/context.txt',sep='\t')
condition_df=pd.read_csv('./input/condition.txt',sep='\t')
### working with condition comparisons
gene_reg={'gene_id':condition_df['geneid'].to_list(),'fold_change':condition_df['fold change'].to_list(),'up_cutoff':1.35,'down_cutoff':float(1/2.5)}
reg_analysis=ReactionExp(cobra_model,gene_reg=gene_reg)
gene_exp={'gene_id':context_df['geneid'].to_list(),'exp_val':context_df['exp_val'].to_list(),'high_cutoff':0.15,'low_cutoff':0.15}
exp_analysis=ReactionExp(cobra_model,gene_exp=gene_exp)
#params_rxns={'up_rxns':reg_analysis.up_rxns,'down_rxns':reg_analysis.down_rxns}
params_rxns={'high_rxns':exp_analysis.high_rxns,'low_rxns':exp_analysis.low_rxns}
# 1) enrichment based on cobra model
#import pdb; pdb.set_trace()
#cobra_model.solver='optlang-gurobi'
#cobra_model.solver= 'optlang-cplex'
# task_enrich = TaskEnrichment(cobra_model,path_to_params,params_rxns)
#
# task_enrich.run()
# 2) enrichment based on cobra model
# Paths
# Paths
path_to_model = join('.','models','iJO1366.json')
thermoDB = join('.','input','thermo_data.thermodb')
path_to_lexicon = join('.','models','iJO1366','lexicon.csv')
path_to_compartment_data = join('.','models','iJO1366','compartment_data.json')
# FBA
model = load_json_model(path_to_model)
fba_solution = model.optimize()
fba_value = fba_solution.objective_value
# Thermo prep
thermo_data = load_thermoDB(thermoDB)
lexicon = read_lexicon(path_to_lexicon)
compartment_data = read_compartment_data(path_to_compartment_data)
# Initialize the cobra_model
tfa_model = ThermoModel(thermo_data, model)
tfa_model.name = 'Tutorial'
# Annotate the cobra_model
annotate_from_lexicon(tfa_model, lexicon)
apply_compartment_data(tfa_model, compartment_data)
tfa_model.prepare()
tfa_model.convert()
# tfa_model.solver.configuration.verbosity = True
tfa_model.logger.setLevel = 30
tfa_solution = tfa_model.optimize()
tfa_value = tfa_solution.objective_value
# It might happen that the model is infeasible. In this case, we can relax
# thermodynamics constraints:
if tfa_value < 0.1:
from pytfa.optim.relaxation import relax_dgo
biomass_rxn = 'Ec_biomass_iJO1366_WT_53p95M'
tfa_model.reactions.get_by_id(biomass_rxn).lower_bound = 0.9 * fba_value
relaxed_model, slack_model, relax_table = relax_dgo(tfa_model, in_place=True)
original_model, tfa_model = tfa_model, relaxed_model
print('Relaxation: ')
print(relax_table)
tfa_solution = tfa_model.optimize()
tfa_value = tfa_solution.objective_value
# Thermo prep
# biomass_rxn = 'Ec_biomass_iJO1366_WT_53p95M'
tfa_model.reactions.get_by_id(biomass_rxn).lower_bound = 0
tfa_model.solver= 'optlang-cplex'
task_enrich = TaskEnrichment(tfa_model,path_to_params,params_rxns)
task_enrich.run()
| 27.869919
| 149
| 0.789673
|
aaa7e40efe58e1d453a2e4c137c3436fb3327c8d
| 42,676
|
py
|
Python
|
django/views/debug.py
|
blaze33/django
|
2f6d887bd0a110e3a662ac1d056d6cdabf38632b
|
[
"BSD-3-Clause"
] | null | null | null |
django/views/debug.py
|
blaze33/django
|
2f6d887bd0a110e3a662ac1d056d6cdabf38632b
|
[
"BSD-3-Clause"
] | null | null | null |
django/views/debug.py
|
blaze33/django
|
2f6d887bd0a110e3a662ac1d056d6cdabf38632b
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils.html import escape
from django.utils.importlib import import_module
from django.utils.encoding import force_bytes, smart_text
from django.utils import six
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponseServerError(text, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponseServerError(html, content_type='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
modpath = settings.DEFAULT_EXCEPTION_REPORTER_FILTER
modname, classname = modpath.rsplit('.', 1)
try:
mod = import_module(modname)
except ImportError as e:
raise ImproperlyConfigured(
'Error importing default exception reporter filter %s: "%s"' % (modpath, e))
try:
default_exception_reporter_filter = getattr(mod, classname)()
except AttributeError:
raise ImproperlyConfigured('Default exception reporter filter module "%s" does not define a "%s" class' % (modname, classname))
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = []
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed.append((name, CLEANSED_SUBSTITUTE))
return cleansed
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
else:
# Potentially cleanse only the request if it's one of the frame variables.
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"Return a Context instance containing traceback information."
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{'name': t, 'exists': os.path.exists(t)} \
for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data())
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.readlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': module_name.startswith('django.') and 'django' or 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if not tried:
# tried exists but is an empty list. The URLconf must've been empty.
return empty_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def empty_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(EMPTY_URLCONF_TEMPLATE, name='Empty URLConf template')
c = Context({
'project_name': settings.SETTINGS_MODULE.split('.')[0]
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>{% for t in loader.templates %}<li><code>{{ t.name }}</code> (File {% if t.exists %}exists{% else %}does not exist{% endif %})</li>{% endfor %}</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard 500 page.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
EMPTY_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
ul { margin-left: 2em; margin-top: 1em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>Of course, you haven't actually done any work yet. Here's what to do next:</p>
<ul>
<li>If you plan to use a database, edit the <code>DATABASES</code> setting in <code>{{ project_name }}/settings.py</code>.</li>
<li>Start your first app by running <code>python manage.py startapp [appname]</code>.</li>
</ul>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
| 38.343217
| 251
| 0.594784
|
2c487bd5efd19f9918ffc7db43580efe51e6a283
| 845
|
py
|
Python
|
bluebottle/projects/migrations/0039_add_project_image_group_permissions.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 10
|
2015-05-28T18:26:40.000Z
|
2021-09-06T10:07:03.000Z
|
bluebottle/projects/migrations/0039_add_project_image_group_permissions.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 762
|
2015-01-15T10:00:59.000Z
|
2022-03-31T15:35:14.000Z
|
bluebottle/projects/migrations/0039_add_project_image_group_permissions.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 9
|
2015-02-20T13:19:30.000Z
|
2022-03-08T14:09:17.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-09-15 11:58
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Anonymous': {
'perms': ('api_read_projectimage',)
},
'Authenticated': {
'perms': (
'api_read_projectimage', 'api_add_own_projectimage',
'api_change_own_projectimage', 'api_delete_own_projectimage',
)
}
}
update_group_permissions('projects', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('projects', '0038_auto_20170915_1358'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
| 23.472222
| 77
| 0.639053
|
f38a9550ad41fe79e75d1e72c47974f90854d360
| 10,469
|
py
|
Python
|
src/livestreamer/stream/hls_playlist.py
|
jaccarmac/livestreamer
|
ab80dbd6560f6f9835865b2fc9f9c6015aee5658
|
[
"BSD-2-Clause",
"MIT"
] | 3,614
|
2015-01-01T08:07:27.000Z
|
2022-03-20T00:31:07.000Z
|
src/livestreamer/stream/hls_playlist.py
|
kviktor/livestreamer
|
ab80dbd6560f6f9835865b2fc9f9c6015aee5658
|
[
"BSD-2-Clause",
"MIT"
] | 1,028
|
2015-01-02T03:38:38.000Z
|
2021-08-06T16:17:48.000Z
|
src/livestreamer/stream/hls_playlist.py
|
kviktor/livestreamer
|
ab80dbd6560f6f9835865b2fc9f9c6015aee5658
|
[
"BSD-2-Clause",
"MIT"
] | 795
|
2015-01-02T06:12:04.000Z
|
2022-03-27T23:41:53.000Z
|
import re
from binascii import unhexlify
from collections import namedtuple
from itertools import starmap
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
__all__ = ["load", "M3U8Parser"]
# EXT-X-BYTERANGE
ByteRange = namedtuple("ByteRange", "range offset")
# EXT-X-KEY
Key = namedtuple("Key", "method uri iv key_format key_format_versions")
# EXT-X-MAP
Map = namedtuple("Map", "uri byterange")
# EXT-X-MEDIA
Media = namedtuple("Media", "uri type group_id language name default "
"autoselect forced characteristics")
# EXT-X-START
Start = namedtuple("Start", "time_offset precise")
# EXT-X-STREAM-INF
StreamInfo = namedtuple("StreamInfo", "bandwidth program_id codecs resolution "
"audio video subtitles")
# EXT-X-I-FRAME-STREAM-INF
IFrameStreamInfo = namedtuple("IFrameStreamInfo", "bandwidth program_id "
"codecs resolution video")
Playlist = namedtuple("Playlist", "uri stream_info media is_iframe")
Resolution = namedtuple("Resolution", "width height")
Segment = namedtuple("Segment", "uri duration title key discontinuity "
"byterange date map")
ATTRIBUTE_REGEX = (r"([A-Z\-]+)=(\d+\.\d+|0x[0-9A-z]+|\d+x\d+|\d+|"
r"\"(.+?)\"|[0-9A-z\-]+)")
class M3U8(object):
def __init__(self):
self.is_endlist = False
self.is_master = False
self.allow_cache = None
self.discontinuity_sequence = None
self.iframes_only = None
self.media_sequence = None
self.playlist_type = None
self.target_duration = None
self.start = None
self.version = None
self.media = []
self.playlists = []
self.segments = []
class M3U8Parser(object):
def __init__(self, base_uri=None):
self.base_uri = base_uri
def create_stream_info(self, streaminf, cls=None):
program_id = streaminf.get("PROGRAM-ID")
if program_id:
program_id = int(program_id)
bandwidth = streaminf.get("BANDWIDTH")
if bandwidth:
bandwidth = int(bandwidth)
resolution = streaminf.get("RESOLUTION")
if resolution:
resolution = self.parse_resolution(resolution)
codecs = streaminf.get("CODECS")
if codecs:
codecs = codecs.split(",")
else:
codecs = []
if cls == IFrameStreamInfo:
return IFrameStreamInfo(bandwidth, program_id, codecs, resolution,
streaminf.get("VIDEO"))
else:
return StreamInfo(bandwidth, program_id, codecs, resolution,
streaminf.get("AUDIO"), streaminf.get("VIDEO"),
streaminf.get("SUBTITLES"))
def split_tag(self, line):
match = re.match("#(?P<tag>[\w-]+)(:(?P<value>.+))?", line)
if match:
return match.group("tag"), match.group("value").strip()
return None, None
def parse_attributes(self, value):
def map_attribute(key, value, quoted):
return (key, quoted or value)
attr = re.findall(ATTRIBUTE_REGEX, value)
return dict(starmap(map_attribute, attr))
def parse_bool(self, value):
return value == "YES"
def parse_byterange(self, value):
match = re.match("(?P<range>\d+)(@(?P<offset>.+))?", value)
if match:
return ByteRange(int(match.group("range")),
int(match.group("offset") or 0))
def parse_extinf(self, value):
match = re.match("(?P<duration>\d+(\.\d+)?)(,(?P<title>.+))?", value)
if match:
return float(match.group("duration")), match.group("title")
def parse_hex(self, value):
value = value[2:]
if len(value) % 2:
value = "0" + value
return unhexlify(value)
def parse_resolution(self, value):
match = re.match("(\d+)x(\d+)", value)
if match:
width, height = int(match.group(1)), int(match.group(2))
else:
width, height = 0, 0
return Resolution(width, height)
def parse_tag(self, line, transform=None):
tag, value = self.split_tag(line)
if transform:
value = transform(value)
return value
def parse_line(self, lineno, line):
if lineno == 0 and not line.startswith("#EXTM3U"):
raise ValueError("Missing #EXTM3U header")
if not line.startswith("#"):
if self.state.pop("expect_segment", None):
byterange = self.state.pop("byterange", None)
extinf = self.state.pop("extinf", (0, None))
date = self.state.pop("date", None)
map_ = self.state.get("map")
key = self.state.get("key")
segment = Segment(self.uri(line), extinf[0],
extinf[1], key,
self.state.pop("discontinuity", False),
byterange, date, map_)
self.m3u8.segments.append(segment)
elif self.state.pop("expect_playlist", None):
streaminf = self.state.pop("streaminf", {})
stream_info = self.create_stream_info(streaminf)
playlist = Playlist(self.uri(line), stream_info, [], False)
self.m3u8.playlists.append(playlist)
elif line.startswith("#EXTINF"):
self.state["expect_segment"] = True
self.state["extinf"] = self.parse_tag(line, self.parse_extinf)
elif line.startswith("#EXT-X-BYTERANGE"):
self.state["expect_segment"] = True
self.state["byterange"] = self.parse_tag(line, self.parse_byterange)
elif line.startswith("#EXT-X-TARGETDURATION"):
self.m3u8.target_duration = self.parse_tag(line, int)
elif line.startswith("#EXT-X-MEDIA-SEQUENCE"):
self.m3u8.media_sequence = self.parse_tag(line, int)
elif line.startswith("#EXT-X-KEY"):
attr = self.parse_tag(line, self.parse_attributes)
iv = attr.get("IV")
if iv: iv = self.parse_hex(iv)
self.state["key"] = Key(attr.get("METHOD"),
self.uri(attr.get("URI")),
iv, attr.get("KEYFORMAT"),
attr.get("KEYFORMATVERSIONS"))
elif line.startswith("#EXT-X-PROGRAM-DATE-TIME"):
self.state["date"] = self.parse_tag(line)
elif line.startswith("#EXT-X-ALLOW-CACHE"):
self.m3u8.allow_cache = self.parse_tag(line, self.parse_bool)
elif line.startswith("#EXT-X-STREAM-INF"):
self.state["streaminf"] = self.parse_tag(line, self.parse_attributes)
self.state["expect_playlist"] = True
elif line.startswith("#EXT-X-PLAYLIST-TYPE"):
self.m3u8.playlist_type = self.parse_tag(line)
elif line.startswith("#EXT-X-ENDLIST"):
self.m3u8.is_endlist = True
elif line.startswith("#EXT-X-MEDIA"):
attr = self.parse_tag(line, self.parse_attributes)
media = Media(self.uri(attr.get("URI")), attr.get("TYPE"),
attr.get("GROUP-ID"), attr.get("LANGUAGE"),
attr.get("NAME"),
self.parse_bool(attr.get("DEFAULT")),
self.parse_bool(attr.get("AUTOSELECT")),
self.parse_bool(attr.get("FORCED")),
attr.get("CHARACTERISTICS"))
self.m3u8.media.append(media)
elif line.startswith("#EXT-X-DISCONTINUITY"):
self.state["discontinuity"] = True
self.state["map"] = None
elif line.startswith("#EXT-X-DISCONTINUITY-SEQUENCE"):
self.m3u8.discontinuity_sequence = self.parse_tag(line, int)
elif line.startswith("#EXT-X-I-FRAMES-ONLY"):
self.m3u8.iframes_only = True
elif line.startswith("#EXT-X-MAP"):
attr = self.parse_tag(line, self.parse_attributes)
byterange = self.parse_byterange(attr.get("BYTERANGE", ""))
self.state["map"] = Map(attr.get("URI"), byterange)
elif line.startswith("#EXT-X-I-FRAME-STREAM-INF"):
attr = self.parse_tag(line, self.parse_attributes)
streaminf = self.state.pop("streaminf", attr)
stream_info = self.create_stream_info(streaminf, IFrameStreamInfo)
playlist = Playlist(self.uri(attr.get("URI")), stream_info, [], True)
self.m3u8.playlists.append(playlist)
elif line.startswith("#EXT-X-VERSION"):
self.m3u8.version = self.parse_tag(line, int)
elif line.startswith("#EXT-X-START"):
attr = self.parse_tag(line, self.parse_attributes)
start = Start(attr.get("TIME-OFFSET"),
self.parse_bool(attr.get("PRECISE", "NO")))
self.m3u8.start = start
def parse(self, data):
self.state = {}
self.m3u8 = M3U8()
for lineno, line in enumerate(filter(bool, data.splitlines())):
self.parse_line(lineno, line)
# Associate Media entries with each Playlist
for playlist in self.m3u8.playlists:
for media_type in ("audio", "video", "subtitles"):
group_id = getattr(playlist.stream_info, media_type, None)
if group_id:
for media in filter(lambda m: m.group_id == group_id,
self.m3u8.media):
playlist.media.append(media)
self.m3u8.is_master = not not self.m3u8.playlists
return self.m3u8
def uri(self, uri):
if uri and uri.startswith("http"):
return uri
elif self.base_uri and uri:
return urljoin(self.base_uri, uri)
else:
return uri
def load(data, base_uri=None, parser=M3U8Parser):
"""Attempts to parse a M3U8 playlist from a string of data.
If specified, *base_uri* is the base URI that relative URIs will
be joined together with, otherwise relative URIs will be as is.
If specified, *parser* can be a M3U8Parser subclass to be used
to parse the data.
"""
return parser(base_uri).parse(data)
| 37.389286
| 81
| 0.568918
|
0b383b28a1c709e0258f40b6e8d78a74eac39eea
| 2,852
|
py
|
Python
|
models/exercise/plot_two_strain_ts.py
|
tethig/sismid
|
6104195619ae2ddbd91921872ffde2b7c83b01ac
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
models/exercise/plot_two_strain_ts.py
|
tethig/sismid
|
6104195619ae2ddbd91921872ffde2b7c83b01ac
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
models/exercise/plot_two_strain_ts.py
|
tethig/sismid
|
6104195619ae2ddbd91921872ffde2b7c83b01ac
|
[
"CC-BY-4.0",
"MIT"
] | 1
|
2022-02-21T09:05:32.000Z
|
2022-02-21T09:05:32.000Z
|
################################################################### #
# Basic plot for two-strain SIR model:
# Time series given some initial conditions
####################################################################
import sys
import csv
import numpy as np
import matplotlib as mpl
mpl.use('TkAgg')
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
from two_strain import *
# Run parameters
run_num = 1 # sys.argv[1]
end_time = 100*365
output_interval = 1.0
step_size = 0.1
# Strain parameters, including initial conditions
beta = np.array([5, 5])/7.0
epsilon = 0.1
gamma = np.array([1, 1])/7.0
mu = 1/(10*365.0)
alpha = np.array([0.5, 1.])
a = np.array([1., 2.5])
omega = 2*np.pi/365.
obs_sd = 0.01
NSS = 0.2
NIS = 1e-3
NRS = 0.02
NRI = 0.0
NSI = 1e-3
NSR = 0.02
NIR = 0.0
# Organize and run simulation
params = np.array([gamma, mu, alpha, a, omega, beta, epsilon])
SI = np.array([NSS, NIS, NRS, NRI, NSI, NSR, NIR])
ic = np.array([NSS, NIS, NRS, NRI, NSI, NSR, NIR, 1-np.sum(SI)])
output = run_two_strain(end_time, output_interval, step_size, params, ic)
# Save output (NIS+NIR, NSI+NRI) to csv and plot
infecteds = np.asarray([output[:, 1] + output[:, 6], output[:, 3] + output[:, 4]])
times = np.arange(0,infecteds.shape[1])
infecteds_t = np.vstack((times, infecteds))
filename = 'infecteds_' + str(run_num) + '.csv'
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['times', 'I1', 'I2'])
writer.writerows(infecteds_t.T)
# Add observation error if present
if obs_sd > 0:
errors = np.random.normal(1, obs_sd, infecteds.shape)
infecteds_obs = infecteds*errors
filename = 'infecteds_obs_' + str(run_num) + '.csv'
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['times', 'I1', 'I2'])
writer.writerows(infecteds_t.T)
plt.subplot(3, 1, 1)
plt.plot(output[:, 0], 'b-', label=r'$N_{SS}$')
plt.plot(output[:, 2], 'g-', label=r'$N_{RS}$')
plt.plot(output[:, 5], 'r-', label=r'$N_{SR}$')
plt.plot(output[:, 7], 'c-', label=r'$N_{RR}$')
plt.xlabel('Time')
plt.ylabel('Uninfected')
plt.legend(loc=1, prop=FontProperties(size='smaller'))
plt.subplot(3, 1, 2)
plt.plot(output[:, 1], 'b-', label=r'$N_{IS}$')
plt.plot(output[:, 6], 'g-', label=r'$N_{IR}$')
plt.plot((output[:, 1]+a[0]*output[:, 6]), 'r-', label=r'$I_1$')
plt.xlabel('Time')
plt.ylabel('Infected 1')
plt.legend(loc=1, prop=FontProperties(size='smaller'))
plt.subplot(3, 1, 3)
plt.plot(output[:, 4], 'b-', label=r'$N_{SI}$')
plt.plot(output[:, 3], 'g-', label=r'$N_{RI}$')
plt.plot((output[:, 4]+a[1]*output[:, 3]), 'r-', label=r'$I_2$')
plt.xlabel('Time')
plt.ylabel('Infected 2')
plt.legend(loc=1, prop=FontProperties(size='smaller'))
plt.savefig("time_series_" + str(run_num) + ".png")
plt.show()
plt.close()
| 31.688889
| 87
| 0.613955
|
5bcabe4cef7c793bd6dcd6a32bdc96ec647b7f5e
| 11,147
|
py
|
Python
|
torchknickknacks/modelutils.py
|
AlGoulas/torchknickknacks
|
d7c70b7af9845ab24bd0f8d4d0e230918bac7144
|
[
"MIT"
] | 4
|
2021-11-23T15:15:07.000Z
|
2022-01-31T14:37:16.000Z
|
torchknickknacks/modelutils.py
|
AlGoulas/torchknickknacks
|
d7c70b7af9845ab24bd0f8d4d0e230918bac7144
|
[
"MIT"
] | null | null | null |
torchknickknacks/modelutils.py
|
AlGoulas/torchknickknacks
|
d7c70b7af9845ab24bd0f8d4d0e230918bac7144
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import OrderedDict
from functools import partial
from pathlib import Path
import pickle
import torch
def get_model_params(model, params_to_get = None, detach = True):
'''Extracts the parameters, names, and 'requires gradient' status from a
model
Input
-----
model: class instance based on the base class torch.nn.Module
params_to_get: list of str, default=None, specifying the names of the
parameters to be extracted
If None, then all parameters and names of parameters from the model
will be extracted
detach: bool, default True, detach the tensor from the computational graph
Output
------
params_name: list, contaning one str for each extracted parameter
params_values: list, containg one tensor corresponding to each
parameter.
NOTE: The tensor is detached from the computation graph
req_grad: list, containing one Boolean variable for each parameter
denoting the requires_grad status of the tensor/parameter
of the model
'''
params_names = []
params_values = []
req_grad = []
for name, param in zip(model.named_parameters(), model.parameters()):
if params_to_get is not None:
if name[0] in params_to_get:
params_names.append(name[0])
if detach is True:
params_values.append(param.detach().clone())
elif detach is False:
params_values.append(param.clone())
req_grad.append(param.requires_grad)
else:
params_names.append(name[0])
if detach is True:
params_values.append(param.detach().clone())
elif detach is False:
params_values.append(param.clone())
req_grad.append(param.requires_grad)
return params_values, params_names, req_grad
def freeze_params(model,
params_to_freeze = None,
freeze = True):
'''Freeze or unfreeze the parametrs of a model
Input
-----
model: class instance based on the base class torch.nn.Module
params_to_freeze: list of str specifying the names of the params to be
frozen or unfrozen
freeze: bool, default True, specifying the freeze or
unfreeze of model params
Output
------
model: class instance based on the base class torch.nn.Module with changed
requires_grad param for the anmes params in params_to_freeze
(freeze = requires_grad is False unfreeze = requires_grad is True)
'''
for name, param in zip(model.named_parameters(), model.parameters()):
if params_to_freeze is not None:
if name[0] in params_to_freeze:
param.requires_grad = True if freeze is False else False
else:
param.requires_grad = True if freeze is False else False
def delete_layers(model, del_ids = []):
'''Delete layers from model
Input
-----
model: model to be modified
del_ids: list, default [], of int the modules/layers
that will be deleted
NOTE: 0, 1... denotes the 1st, 2nd etc layer
Output
------
model: model with deleted modules/layers that is an instance of
torch.nn.modules.container.Sequential
'''
children = [c for i,c in enumerate(model.named_children()) if i not in del_ids]
model = torch.nn.Sequential(
OrderedDict(children)
)
return model
def add_layers(model, modules = []):
'''Add layers/modules to torch.nn.modules.container.Sequential
Input
-----
model: instance of class of base class torch.nn.Module
modules: list of dict
each dict has key:value pairs
{
'name': str
'position': int
'module': torch.nn.Module
}
with:
name: str, name to be added in the nn.modules.container.Sequential
position: int, [0,..N], with N>0, also -1, where N the total
nr of modules in the torch.nn.modules.container.Sequential
-1 denotes the module that will be appended at the end
module: torch.nn.Module
Output
------
model: model with added modules/layers that is an instance of
torch.nn.modules.container.Sequential
'''
all_positions = [m['position'] for m in modules]
current_children = [c for c in model.named_children()]
children = []
children_idx = 0
iterations = len(current_children) + len(all_positions)
if -1 in all_positions: iterations -= 1
for i in range(iterations):
if i not in all_positions:
children.append(current_children[children_idx])
children_idx += 1
else:
idx = all_positions.index(i)
d = modules[idx]
children.append((d['name'], d['module']))
if -1 in all_positions:
idx = all_positions.index(-1)
d = modules[idx]
children.append((d['name'], d['module']))
model = torch.nn.Sequential(
OrderedDict(children)
)
return model
class Recorder():
'''Get input, output or parameters to a module/layer
by registering forward or backward hooks
Input
-----
module: a module of a class in torch.nn.modules
record_input: bool, default False, deciding if input to module will be
recorded
record_output: bool, default False, deciding if output to module will be
recorded
record_params: bool, default False, deciding if params of module will be
recorded
params_to_get: list of str, default None, specifying the parameters to be
recorded from the module (if None all parameters are recorded)
NOTE: meaningful only if record_params
backward: bool, default False, deciding if a forward or backward hook
will be registered and the recprding will be performed accordingly
custom_fn: function, default None, to be executed in the forward or backward
pass.
It must have the following signature:
custom_fn(module, output, input, **kwars)
with kwars optional
The signature follows the signature of functions to be registered
in hooks. See for more details:
https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_forward_hook.html
save_to: str, default None, specifying a path to a folder for all recordings
to be saved.
NOTE: recodrings are saved with filename: recording_0, recording_1, recording_N
**kwargs: if keyword args are specified they will be passed as to the
custom_fn
The attribute recording contains the output, input or params of a module
'''
def __init__(self,
module,
record_input = False,
record_output = False,
record_params = False,
params_to_get = None,
backward = False,
custom_fn = None,
save_to = None,
**kwargs):
self.params_to_get = params_to_get
self.kwargs = kwargs if kwargs else None
if save_to:
self.counter = 0#if path is specified, keep a counter
self.save_to = save_to
if record_input is True:
fn = partial(self._fn_in_out_params, record_what = 'input')
elif record_output is True:
fn = partial(self._fn_in_out_params, record_what = 'output')
elif record_params is True:
fn = partial(self._fn_in_out_params, record_what = 'params')
if custom_fn is not None:
fn = self._custom_wrapper
self.custom_fn = custom_fn
if backward is False:
self.hook = module.register_forward_hook(fn)
elif backward is True:
self.hook = module.register_full_backward_hook(fn)
def _fn_in_out_params(self, module, input, output, record_what = None):
att = getattr(self, 'save_to', None)
if att is None:
if record_what == 'input':
self.recording = input
elif record_what == 'output':
self.recording = output
elif record_what == 'params':
params = get_model_params(module, params_to_get = self.params_to_get)[0]
self.recording = params
else:
name = 'recording_' + str(self.counter)
filename = Path(self.save_to) / name
self.counter += 1
with open(filename, 'wb') as handle:
if record_what == 'input':
pickle.dump(input, handle, protocol = pickle.HIGHEST_PROTOCOL)
elif record_what == 'output':
pickle.dump(output, handle, protocol = pickle.HIGHEST_PROTOCOL)
elif record_what == 'params':
params = get_model_params(module, params_to_get = self.params_to_get)[0]
pickle.dump(params, handle, protocol = pickle.HIGHEST_PROTOCOL)
def _custom_wrapper(self, module, input, output):
if self.kwargs:
res = self.custom_fn(module, input, output, **self.kwargs)
else:
res = self.custom_fn(module, input, output)
att = getattr(self, 'save_to', None)
if res and att is None:
self.recording = res
elif res and att:
name = 'recording_' + str(self.counter)
filename = Path(self.save_to) / name
self.counter += 1
with open(filename, 'wb') as handle:
pickle.dump(res, handle, protocol = pickle.HIGHEST_PROTOCOL)
def close(self):
self.hook.remove()
att = getattr(self, 'counter', None)
if att: self.counter = 0
def get_all_layers(model):
'''
Get all the children (layers) from a model, even the ones that are nested
Input
-----
model: class instance based on the base class torch.nn.Module
Output
------
all_layers: list of all layers of the model
Adapted from:
https://stackoverflow.com/questions/54846905/pytorch-get-all-layers-of-model
'''
children = list(model.children())
all_layers = []
if not children:#if model has no children model is last child
return model
else:
# Look for children from children to the last child
for child in children:
try:
all_layers.extend(get_all_layers(child))
except TypeError:
all_layers.append(get_all_layers(child))
return all_layers
| 35.613419
| 107
| 0.592805
|
e67522cbe4a624d8b939ea0fcff67b2bce6b424a
| 100
|
py
|
Python
|
run4it/api/user/__init__.py
|
andraune/Run4IT_BackEnd
|
a481427a0d1189a1f08c42e7ac1b452af6bbfc8d
|
[
"MIT"
] | 1
|
2022-03-29T06:11:20.000Z
|
2022-03-29T06:11:20.000Z
|
run4it/api/user/__init__.py
|
andraune/run4it_backend
|
a481427a0d1189a1f08c42e7ac1b452af6bbfc8d
|
[
"MIT"
] | null | null | null |
run4it/api/user/__init__.py
|
andraune/run4it_backend
|
a481427a0d1189a1f08c42e7ac1b452af6bbfc8d
|
[
"MIT"
] | null | null | null |
# add this to force db migrate to detect models
from .model import User, UserConfirmation # noqa
| 33.333333
| 50
| 0.76
|
1ab20f8f07f603c9b01d3f21024cb55b03532396
| 4,776
|
py
|
Python
|
lib/game/bot.py
|
Pavivenkatesan/kivy-tictactoe
|
bca0a9753017402ddd49a1c3552f1a97a820a653
|
[
"MIT"
] | null | null | null |
lib/game/bot.py
|
Pavivenkatesan/kivy-tictactoe
|
bca0a9753017402ddd49a1c3552f1a97a820a653
|
[
"MIT"
] | null | null | null |
lib/game/bot.py
|
Pavivenkatesan/kivy-tictactoe
|
bca0a9753017402ddd49a1c3552f1a97a820a653
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
from lib.game.player import Player
from itertools import product
class Bot(Player):
state_space = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]
# Initializing state values
each_player = ['X', 'O', ' ']
states_dictionary = {}
# listing all possible states
states = [[list(i[0:3]), list(i[3:6]), list(i[6:10])] for i in product(each_player, repeat=9)]
Total_moves = 9
print("Total number of actions = ", Total_moves)
# getting Total number of states
Total_states = len(states)
# Intializing agent intial value as 0
sv_O = np.full(Total_states, 0.0)
# Training file
sv_O = np.loadtxt('lib/game/trained_O.txt', dtype=np.float64)
# Determining the current state value and determining the win
def cur_state(state_space):
if (state_space[0][0] == state_space[0][1] and state_space[0][1] == state_space[0][2] and state_space[0][
0] is not ' '):
return state_space[0][0], "Done"
if (state_space[1][0] == state_space[1][1] and state_space[1][1] == state_space[1][2] and state_space[1][
0] is not ' '):
return state_space[1][0], "Done"
if (state_space[2][0] == state_space[2][1] and state_space[2][1] == state_space[2][2] and state_space[2][
0] is not ' '):
return state_space[2][0], "Done"
if (state_space[0][0] == state_space[1][0] and state_space[1][0] == state_space[2][0] and state_space[0][
0] is not ' '):
return state_space[0][0], "Done"
if (state_space[0][1] == state_space[1][1] and state_space[1][1] == state_space[2][1] and state_space[0][
1] is not ' '):
return state_space[0][1], "Done"
if (state_space[0][2] == state_space[1][2] and state_space[1][2] == state_space[2][2] and state_space[0][
2] is not ' '):
return state_space[0][2], "Done"
if (state_space[0][0] == state_space[1][1] and state_space[1][1] == state_space[2][2] and state_space[0][
0] is not ' '):
return state_space[1][1], "Done"
if (state_space[2][0] == state_space[1][1] and state_space[1][1] == state_space[0][2] and state_space[2][
0] is not ' '):
return state_space[1][1], "Done"
# if none of the above is true there must be a draw
draw = 0
for i in range(3):
for j in range(3):
if state_space[i][j] is ' ':
draw = 1
if draw is 0:
return None, "Draw"
return None, "Not Done"
# Defining the state values for agent O
for i in range(Total_states):
states_dictionary[i] = states[i]
won_by, _ = cur_state(states_dictionary[i])
if won_by == 'X':
sv_O[i] = -1
elif won_by == 'O':
sv_O[i] = 1
def check_draw(self, sv):
draw = 0
for i in range(3):
for j in range(3):
if sv[i][j] is ' ':
draw = 1
if draw is 0:
return 0, "Draw"
def play(self, sv, each_player, cell):
if sv[int((cell - 1) / 3)][(cell - 1) % 3] is ' ':
sv[int((cell - 1) / 3)][(cell - 1) % 3] = each_player
else:
cell = int(input(" Choose again, Cell is not empty: "))
sv.play(sv, each_player, cell)
# Defining new state function: which traverse over rows and columns and returns new state
def new(self, state):
ns = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]
for i in range(3):
for j in range(3):
ns[i][j] = state[i][j]
return ns
def get_Predicted_Values(self, sv):
if(self.check_draw(sv)):
return 0
print("The current state :" + str(sv))
actions = []
curr_state_values = []
empty_cells = []
csv = list(self.states_dictionary.keys())[list(self.states_dictionary.values()).index(self.state_space)]
for i in range(3):
for j in range(3):
if sv[i][j] is ' ':
empty_cells.append(i * 3 + (j + 1))
for empty_cell in empty_cells:
actions.append(empty_cell)
new_state = self.new(sv)
self.play(new_state, "O", empty_cell)
next_sid = list(self.states_dictionary.keys())[list(self.states_dictionary.values()).index(new_state)]
curr_state_values.append(self.sv_O[next_sid])
print('Possible Action moves = ' + str(actions))
print('Action Move values = ' + str(curr_state_values))
best_move_id = np.argmax(curr_state_values)
best_move = actions[best_move_id]
return best_move
| 37.606299
| 114
| 0.54062
|
b6373fd0b2398b1af4945c8a787113cc4ab4cd35
| 9,175
|
py
|
Python
|
cartography/intel/aws/ec2/tgw.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 2,322
|
2019-03-02T01:07:20.000Z
|
2022-03-31T20:39:12.000Z
|
cartography/intel/aws/ec2/tgw.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 462
|
2019-03-07T18:38:11.000Z
|
2022-03-31T14:55:20.000Z
|
cartography/intel/aws/ec2/tgw.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 246
|
2019-03-03T02:39:23.000Z
|
2022-02-24T09:46:38.000Z
|
import logging
from typing import Dict
from typing import List
import boto3
import botocore.exceptions
import neo4j
from .util import get_botocore_config
from cartography.util import aws_handle_regions
from cartography.util import run_cleanup_job
from cartography.util import timeit
logger = logging.getLogger(__name__)
@timeit
@aws_handle_regions
def get_transit_gateways(boto3_session: boto3.session.Session, region: str) -> List[Dict]:
client = boto3_session.client('ec2', region_name=region, config=get_botocore_config())
data: List[Dict] = []
try:
data = client.describe_transit_gateways()["TransitGateways"]
except botocore.exceptions.ClientError as e:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html#parsing-error-responses-and-catching-exceptions-from-aws-services
logger.warning(
"Could not retrieve Transit Gateways due to boto3 error %s: %s. Skipping.",
e.response['Error']['Code'],
e.response['Error']['Message'],
)
return data
@timeit
@aws_handle_regions
def get_tgw_attachments(boto3_session: boto3.session.Session, region: str) -> List[Dict]:
client = boto3_session.client('ec2', region_name=region, config=get_botocore_config())
tgw_attachments: List[Dict] = []
try:
paginator = client.get_paginator('describe_transit_gateway_attachments')
for page in paginator.paginate():
tgw_attachments.extend(page['TransitGatewayAttachments'])
except botocore.exceptions.ClientError as e:
logger.warning(
"Could not retrieve Transit Gateway Attachments due to boto3 error %s: %s. Skipping.",
e.response['Error']['Code'],
e.response['Error']['Message'],
)
return tgw_attachments
@timeit
@aws_handle_regions
def get_tgw_vpc_attachments(boto3_session: boto3.session.Session, region: str) -> List[Dict]:
client = boto3_session.client('ec2', region_name=region, config=get_botocore_config())
tgw_vpc_attachments: List[Dict] = []
try:
paginator = client.get_paginator('describe_transit_gateway_vpc_attachments')
for page in paginator.paginate():
tgw_vpc_attachments.extend(page['TransitGatewayVpcAttachments'])
except botocore.exceptions.ClientError as e:
logger.warning(
"Could not retrieve Transit Gateway VPC Attachments due to boto3 error %s: %s. Skipping.",
e.response['Error']['Code'],
e.response['Error']['Message'],
)
return tgw_vpc_attachments
@timeit
def load_transit_gateways(
neo4j_session: neo4j.Session, data: List[Dict], region: str, current_aws_account_id: str,
update_tag: int,
) -> None:
ingest_transit_gateway = """
MERGE (ownerAccount:AWSAccount {id: {OwnerId}})
ON CREATE SET ownerAccount.firstseen = timestamp(), ownerAccount.foreign = true
SET ownerAccount.lastupdated = {update_tag}
MERGE (tgw:AWSTransitGateway {id: {ARN}})
ON CREATE SET tgw.firstseen = timestamp(), tgw.arn = {ARN}
SET tgw.tgw_id = {TgwId},
tgw.ownerid = {OwnerId},
tgw.state = {State},
tgw.description = {Description},
tgw.region = {Region},
tgw.lastupdated = {update_tag}
WITH tgw
MERGE (ownerAccount)-[r:RESOURCE]->(tgw)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
"""
for tgw in data:
tgw_id = tgw["TransitGatewayId"]
neo4j_session.run(
ingest_transit_gateway,
TgwId=tgw_id,
ARN=tgw["TransitGatewayArn"],
Description=tgw.get("Description"),
Region=region,
AWS_ACCOUNT_ID=current_aws_account_id,
OwnerId=tgw["OwnerId"],
State=tgw["State"],
update_tag=update_tag,
)
_attach_shared_transit_gateway(
neo4j_session, tgw, region, current_aws_account_id, update_tag,
)
@timeit
def _attach_shared_transit_gateway(
neo4j_session: neo4j.Session, tgw: Dict, region: str, current_aws_account_id: str, update_tag: int,
) -> None:
attach_tgw = """
MERGE (tgw:AWSTransitGateway {id: {ARN}})
ON CREATE SET tgw.firstseen = timestamp()
SET tgw.lastupdated = {update_tag}
WITH tgw
MATCH (currentAccount:AWSAccount{id: {AWS_ACCOUNT_ID}})
MERGE (tgw)-[s:SHARED_WITH]->(currentAccount)
ON CREATE SET s.firstseen = timestamp()
SET s.lastupdated = {update_tag}
"""
if tgw["OwnerId"] != current_aws_account_id:
neo4j_session.run(
attach_tgw,
ARN=tgw["TransitGatewayArn"],
TransitGatewayId=tgw["TransitGatewayId"],
AWS_ACCOUNT_ID=current_aws_account_id,
update_tag=update_tag,
)
@timeit
def load_tgw_attachments(
neo4j_session: neo4j.Session, data: List[Dict], region: str, current_aws_account_id: str,
update_tag: int,
) -> None:
ingest_transit_gateway = """
MERGE (tgwa:AWSTransitGatewayAttachment{id: {TgwAttachmentId}})
ON CREATE SET tgwa.firstseen = timestamp()
SET tgwa.region = {Region},
tgwa.resource_type = {ResourceType},
tgwa.state = {State},
tgwa.lastupdated = {update_tag}
WITH tgwa
MATCH (awsAccount:AWSAccount {id: {AWS_ACCOUNT_ID}})
MERGE (awsAccount)-[r:RESOURCE]->(tgwa)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
WITH tgwa
MATCH (tgw:AWSTransitGateway {tgw_id: {TransitGatewayId}})
MERGE (tgwa)-[attach:ATTACHED_TO]->(tgw)
ON CREATE SET attach.firstseen = timestamp()
SET attach.lastupdated = {update_tag}
"""
for tgwa in data:
tgwa_id = tgwa["TransitGatewayAttachmentId"]
neo4j_session.run(
ingest_transit_gateway,
TgwAttachmentId=tgwa_id,
TransitGatewayId=tgwa["TransitGatewayId"],
ResourceId=tgwa.get("ResourceId"),
Region=region,
AWS_ACCOUNT_ID=current_aws_account_id,
ResourceType=tgwa.get("ResourceType"),
State=tgwa["State"],
update_tag=update_tag,
)
if tgwa.get("VpcId"): # only attach if the TGW attachment is a VPC TGW attachment
_attach_tgw_vpc_attachment_to_vpc_subnets(
neo4j_session, tgwa, region, current_aws_account_id, update_tag,
)
@timeit
def _attach_tgw_vpc_attachment_to_vpc_subnets(
neo4j_session: neo4j.Session, tgw_vpc_attachment: Dict, region: str,
current_aws_account_id: str, update_tag: int,
) -> None:
"""
Attach a VPC Transit Gateway Attachment to the VPC and and subnets
"""
attach_vpc_tgw_attachment_to_vpc = """
MERGE (vpc:AWSVpc {id: {VpcId}})
ON CREATE SET vpc.firstseen = timestamp()
SET vpc.lastupdated = {update_tag}
WITH vpc
MATCH (tgwa:AWSTransitGatewayAttachment {id: {TgwAttachmentId}})
MERGE (vpc)-[r:RESOURCE]->(tgwa)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
"""
attach_vpc_tgw_attachment_to_subnet = """
MERGE (sub:EC2Subnet {subnetid: {SubnetId}})
ON CREATE SET sub.firstseen = timestamp()
SET sub.lastupdated = {update_tag}
WITH sub
MATCH (tgwa:AWSTransitGatewayAttachment {id: {TgwAttachmentId}})
MERGE (tgwa)-[p:PART_OF_SUBNET]->(sub)
ON CREATE SET p.firstseen = timestamp()
SET p.lastupdated = {update_tag}
"""
neo4j_session.run(
attach_vpc_tgw_attachment_to_vpc,
VpcId=tgw_vpc_attachment["VpcId"],
TgwAttachmentId=tgw_vpc_attachment["TransitGatewayAttachmentId"],
update_tag=update_tag,
)
for subnet_id in tgw_vpc_attachment["SubnetIds"]:
neo4j_session.run(
attach_vpc_tgw_attachment_to_subnet,
SubnetId=subnet_id,
TgwAttachmentId=tgw_vpc_attachment["TransitGatewayAttachmentId"],
update_tag=update_tag,
)
@timeit
def cleanup_transit_gateways(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:
run_cleanup_job('aws_import_tgw_cleanup.json', neo4j_session, common_job_parameters)
def sync_transit_gateways(
neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: List[str], current_aws_account_id: str,
update_tag: int, common_job_parameters: Dict,
) -> None:
for region in regions:
logger.info("Syncing AWS Transit Gateways for region '%s' in account '%s'.", region, current_aws_account_id)
tgws = get_transit_gateways(boto3_session, region)
load_transit_gateways(neo4j_session, tgws, region, current_aws_account_id, update_tag)
logger.debug(
"Syncing AWS Transit Gateway Attachments for region '%s' in account '%s'.",
region, current_aws_account_id,
)
tgw_attachments = get_tgw_attachments(boto3_session, region)
tgw_vpc_attachments = get_tgw_vpc_attachments(boto3_session, region)
load_tgw_attachments(
neo4j_session, tgw_attachments + tgw_vpc_attachments,
region, current_aws_account_id, update_tag,
)
cleanup_transit_gateways(neo4j_session, common_job_parameters)
| 35.288462
| 157
| 0.680981
|
e33ca7fc699ca6cc1f3e064db2ddb7b2fe8db069
| 2,119
|
py
|
Python
|
purdue/models/dining.py
|
ScriptedButton/BoilerBot
|
80419d075cc51f5d90652070b851334bd9382eaa
|
[
"Apache-2.0"
] | 2
|
2021-08-05T04:25:45.000Z
|
2021-08-05T05:55:24.000Z
|
purdue/models/dining.py
|
ScriptedButton/BoilerBot
|
80419d075cc51f5d90652070b851334bd9382eaa
|
[
"Apache-2.0"
] | null | null | null |
purdue/models/dining.py
|
ScriptedButton/BoilerBot
|
80419d075cc51f5d90652070b851334bd9382eaa
|
[
"Apache-2.0"
] | null | null | null |
import aiohttp
class Dining:
def __init__(self):
import purdue.models
self._LOCATION_URL = "https://api.hfs.purdue.edu/menus/v2/locations"
self._locations: list[purdue.models.Location] = list()
def get_location(self, name):
for location in self.locations:
if location.name == name:
return location
async def load(self):
import purdue.models
async with aiohttp.ClientSession() as session:
async with session.get(self._LOCATION_URL) as response:
response_json = await response.json()
for location in response_json.get("Location"):
location_obj = purdue.models.Location(
id=location.get("LocationId"),
name=location.get("Name"),
formal_name=location.get("FormalName"),
phone_number=location.get("PhoneNumber"),
latitude=location.get("Latitude"),
longitude=location.get("Longitude"),
short_name=location.get("ShortName"),
url=location.get("Url"),
google_place_id=location.get("GooglePlaceId"),
type=location.get("Type"),
transact_mobile_order_id=location.get("TransactMobileOrderId"),
address=purdue.models.Address(
street=location.get("Address").get("Street"),
city=location.get("Address").get("City"),
state=location.get("Address").get("State"),
zip_code=location.get("Address").get("ZipCode"),
country=location.get("Address").get("Country"),
country_code=location.get("Address").get("CountryCode")
),
meals=None
)
self.locations.append(location_obj)
@property
def locations(self):
return self._locations
| 45.085106
| 87
| 0.515809
|
9de4c90e5df54335c98bdebe28bfc1ceb315924c
| 5,296
|
py
|
Python
|
app/auth/authorisation.py
|
dudikbender/geocoder-api
|
3879f7729fcc7658f6ae75e65e187af4667cc393
|
[
"MIT"
] | null | null | null |
app/auth/authorisation.py
|
dudikbender/geocoder-api
|
3879f7729fcc7658f6ae75e65e187af4667cc393
|
[
"MIT"
] | null | null | null |
app/auth/authorisation.py
|
dudikbender/geocoder-api
|
3879f7729fcc7658f6ae75e65e187af4667cc393
|
[
"MIT"
] | null | null | null |
from fastapi import Depends, APIRouter, HTTPException, Security, status
from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer, HTTPBasic, HTTPBasicCredentials
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
import os
from dotenv import find_dotenv, load_dotenv
import pandas as pd
from pandas import json_normalize
import json
import requests
from passlib.context import CryptContext
from datetime import datetime, timedelta
from typing import Optional
from jose import JWTError, jwt
from app.utils.schema import Token, TokenData, User, UserInDB
env_loc = find_dotenv('.env')
load_dotenv(env_loc)
api_key = os.environ.get('AIRTABLE_API_KEY')
app_auth = APIRouter()
# Define the auth scheme and access token URL
# token_schema = HTTPBearer(scheme_name='Authorization Bearer Token') For API TOKEN option
auth_security = HTTPBasic()
#oauth2_scheme = OAuth2PasswordBearer(tokenUrl="auth/token")
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
# Settings for encryption
SECRET_KEY = os.environ.get('SECRET_KEY')
ALGORITHM = os.environ.get('ALGORITHM')
ACCESS_TOKEN_EXPIRE_MINUTES = int(os.environ.get('ACCESS_TOKEN_EXPIRE_MINUTES'))
def create_password_hash(password):
return pwd_context.hash(password)
def verify_password(plain_password, password_hash):
return pwd_context.verify(plain_password, password_hash)
def get_all_users():
url = f'https://api.airtable.com/v0/appe7KAdke2PXikpW/Users'
headers = {'Authorization': f'Bearer {api_key}'}
response = requests.get(url, headers=headers).json()
user_list = []
for record in response['records']:
fields = record['fields']
fields['id'] = record['id']
user_list.append(fields)
df = json_normalize(user_list)
return df
def get_user(username: str):
user_df = get_all_users()
user = user_df[user_df.username == username]
if len(user) == 0:
print('Username either does not exist or is misspelled.')
return False
else:
user_data = json.loads(user.to_json(orient='records'))[0]
return UserInDB(**user_data)
def authenticate_user(username, password):
# First, retrieve the user by the email provided
user = get_user(username)
if not user:
return False
# If present, verify password against password hash in database
password_hash = user.hashed_password
if not verify_password(password, password_hash):
return False
return user
# Deprecated token authorization flow - keeping for reference
'''@app_auth.post('/')
async def login(token: HTTPAuthorizationCredentials = Security(api_key_schema)):
if token.dict()['credentials'] != app_key:
raise HTTPException(status_code=401, detail='Api key is not correct.')
else:
return token.dict()['credentials']'''
# Create access token, required for OAuth2 flow
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
# OAuth2 Scheme - Decrypt the token and retrieve the username from payload
'''async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user = get_user(username=token_data.username)
if user is None:
raise credentials_exception
return user'''
# HTTP Basic scheme
async def get_current_user(credentials: str = Depends(auth_security)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
username = credentials.username
password = credentials.password
user = authenticate_user(username, password)
if username is None:
raise credentials_exception
except:
raise credentials_exception
return user
# Endpoint for token authorisation
@app_auth.post("/token")
async def login_for_access_token(form_data: HTTPBasicCredentials = Depends(auth_security)):
user = authenticate_user(form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
| 37.560284
| 107
| 0.723943
|
dfd4401bce065b834531e06ceb2b277c34c88178
| 983
|
py
|
Python
|
Algorithms_easy/1271. Hexspeak.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | 4
|
2020-08-11T20:45:15.000Z
|
2021-03-12T00:33:34.000Z
|
Algorithms_easy/1271. Hexspeak.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
Algorithms_easy/1271. Hexspeak.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
"""
1271. Hexspeak
Easy
A decimal number can be converted to its Hexspeak representation by first converting it to an uppercase hexadecimal string, then replacing all occurrences of the digit 0 with the letter O, and the digit 1 with the letter I. Such a representation is valid if and only if it consists only of the letters in the set {"A", "B", "C", "D", "E", "F", "I", "O"}.
Given a string num representing a decimal integer N, return the Hexspeak representation of N if it is valid, otherwise return "ERROR".
Example 1:
Input: num = "257"
Output: "IOI"
Explanation: 257 is 101 in hexadecimal.
Example 2:
Input: num = "3"
Output: "ERROR"
Constraints:
1 <= N <= 10^12
There are no leading zeros in the given string.
All answers must be in uppercase letters.
"""
class Solution:
def toHexspeak(self, num: str) -> str:
s = hex(int(num)).upper()[2:].replace('0', 'O').replace('1', 'I')
return 'ERROR' if any(c.isdigit() for c in s) else s
| 28.911765
| 355
| 0.682604
|
79521fcbfb967e650cb7e2059d63cc45c8045a9e
| 1,202
|
py
|
Python
|
config.py
|
marcdjulien/govindex
|
18740206e54aecfb4193e910e5076ee504229779
|
[
"MIT"
] | null | null | null |
config.py
|
marcdjulien/govindex
|
18740206e54aecfb4193e910e5076ee504229779
|
[
"MIT"
] | null | null | null |
config.py
|
marcdjulien/govindex
|
18740206e54aecfb4193e910e5076ee504229779
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
import os
from decouple import config
class Config(object):
basedir = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = config('SECRET_KEY', default='sdnfogui3wn')
SQLALCHEMY_DATABASE_URI = config('SQLALCHEMY_DATABASE_URI', default='sqlite:///test-3.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
DEBUG = False
# Security
SESSION_COOKIE_HTTPONLY = True
REMEMBER_COOKIE_HTTPONLY = True
REMEMBER_COOKIE_DURATION = 3600
# PostgreSQL database
SQLALCHEMY_DATABASE_URI = '{}://{}:{}@{}:{}/{}'.format(
config('DB_ENGINE' , default='postgresql' ),
config('DB_USERNAME' , default='appseed' ),
config('DB_PASS' , default='pass' ),
config('DB_HOST' , default='localhost' ),
config('DB_PORT' , default=5432 ),
config('DB_NAME' , default='appseed-flask' )
)
class DebugConfig(Config):
DEBUG = True
SQLALCHEMY_ECHO = True
# Load all possible configurations
config_dict = {
'Production': ProductionConfig,
'Debug' : DebugConfig
}
| 28.619048
| 94
| 0.633943
|
16c07bd40b0480b995a6c42232cd9d87a3c15660
| 442
|
py
|
Python
|
explorer/notes/migrations/0013_auto_20191126_1408.py
|
UPstartDeveloper/explorer_buddy
|
467fa77307a588645e7a9fd269ae13b6b24d4efc
|
[
"MIT"
] | null | null | null |
explorer/notes/migrations/0013_auto_20191126_1408.py
|
UPstartDeveloper/explorer_buddy
|
467fa77307a588645e7a9fd269ae13b6b24d4efc
|
[
"MIT"
] | 22
|
2019-12-05T01:10:16.000Z
|
2022-03-12T00:06:51.000Z
|
explorer/notes/migrations/0013_auto_20191126_1408.py
|
UPstartDeveloper/explorer_buddy
|
467fa77307a588645e7a9fd269ae13b6b24d4efc
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-11-26 22:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0012_auto_20191126_1142'),
]
operations = [
migrations.AlterField(
model_name='note',
name='media',
field=models.FileField(blank=True, help_text='Optional image to add to note.', upload_to='image/'),
),
]
| 23.263158
| 111
| 0.61086
|
5971161ca781f643c9d23acb275b3795886ab8a3
| 8,952
|
py
|
Python
|
reaction_completer/completer.py
|
CederGroupHub/ReactionCompleter
|
6b20a9a4ffdf19e0d51c99901d104d8f737ce762
|
[
"MIT"
] | null | null | null |
reaction_completer/completer.py
|
CederGroupHub/ReactionCompleter
|
6b20a9a4ffdf19e0d51c99901d104d8f737ce762
|
[
"MIT"
] | null | null | null |
reaction_completer/completer.py
|
CederGroupHub/ReactionCompleter
|
6b20a9a4ffdf19e0d51c99901d104d8f737ce762
|
[
"MIT"
] | null | null | null |
import logging
from functools import reduce
from operator import or_
import sympy
from sympy import Matrix, symbols
from reaction_completer.errors import (
StupidRecipe, TooManyPrecursors, TooFewPrecursors)
from reaction_completer.formatting import simplify_print
from reaction_completer.material import MaterialInformation
__author__ = 'Haoyan Huo'
__maintainer__ = 'Haoyan Huo'
__email__ = 'haoyan.huo@lbl.gov'
class ReactionCompleter(object):
def __init__(self, precursors: [MaterialInformation],
target: MaterialInformation,
target_min_nv=2):
"""
A reaction completer that takes a set of precursors and a target,
then calculates the possible reactions, using sympy for symbolic
derivation.
:param precursors: List of precursors.
:type precursors: list(MaterialInformation)
:param target: The target material.
:type target: MaterialInformation
:param target_min_nv:
"""
self.precursors = precursors
self.target = target
self.target_min_nv = target_min_nv
self._precursor_candidates = []
self._decomposition_chemicals = {}
self._exchange_chemicals = {}
self._linear_eq = {}
self._inspect_target()
self._prepare_precursors()
self._setup_linear_equation()
def _inspect_target(self):
"""
Prepare the target material into a ready-to-use structure.
"""
if len(self.target.nv_elements) < self.target_min_nv:
raise StupidRecipe(
'Target must have more than 1 non volatile elements, got %r: %s' %
(self.target.nv_elements, self.target.material_formula))
def _prepare_precursors(self):
# find the set of precursors
seen_precursors = set()
for precursor in self.precursors:
# Skip precursors that are seen
if precursor.material_formula in seen_precursors:
continue
seen_precursors.add(precursor.material_formula)
if precursor.all_elements_dict == self.target.all_elements_dict:
# TODO: we need a smarter comparison
raise StupidRecipe('Precursor list contains target')
if len(precursor.all_elements) == 0:
logging.debug(
'Skipping empty precursor %s: %s',
precursor.material_formula)
continue
excessive = precursor.nv_elements - self.target.nv_elements
if len(excessive) > 0:
logging.debug(
'Skipping precursor %s because it '
'has excessive chemical elements %r',
precursor.material_formula, excessive)
continue
self._precursor_candidates.append(precursor)
self._decomposition_chemicals.update(precursor.decompose_chemicals)
self._exchange_chemicals.update(self.target.exchange_chemicals)
if len(self._precursor_candidates) == 0:
raise StupidRecipe('Precursor candidates is empty')
# check for equality
precursors_nv_elements = reduce(
or_, [x.nv_elements for x in self._precursor_candidates])
missing_elements = self.target.nv_elements - precursors_nv_elements
if len(missing_elements) > 0:
raise StupidRecipe(
'Precursor candidates do not '
'provide non volatile elements: %r' % missing_elements)
def _setup_linear_equation(self):
all_elements = reduce(
or_,
[x.all_elements for x in self._precursor_candidates] +
[self.target.all_elements] +
[set(x) for x in self._exchange_chemicals.values()] +
[set(x) for x in self._decomposition_chemicals.values()])
all_elements = sorted(list(all_elements))
# Create the symbols that will be used for linear eq.
chemical_symbols = ''
for i in range(len(self._precursor_candidates)):
chemical_symbols += 'p%d, ' % i
for i in range(len(self._decomposition_chemicals)):
chemical_symbols += 'r%d, ' % i
for i in range(len(self._exchange_chemicals)):
chemical_symbols += 'e%d, ' % i
chemical_symbols += 't'
chemical_symbols = symbols(chemical_symbols)
coefficient_matrix = []
which_side = []
def fill_row(material_elements):
row = [material_elements.get(element, 0) for element in all_elements]
coefficient_matrix.append(row)
for precursor in self._precursor_candidates:
fill_row(precursor.all_elements_dict)
which_side.append('fl')
for chemical in sorted(self._decomposition_chemicals):
fill_row(self._decomposition_chemicals[chemical])
which_side.append('dr')
for chemical in sorted(self._exchange_chemicals):
fill_row(self._exchange_chemicals[chemical])
which_side.append('dl')
target_elements = self.target.all_elements_dict
target_vector = [target_elements.get(i, 0) for i in all_elements]
coefficient_matrix = Matrix(coefficient_matrix).T
target_vector = Matrix(target_vector)
self._linear_eq.update({
'chemical_symbols': chemical_symbols,
'coefficient_matrix': coefficient_matrix,
'target_vector': target_vector,
'which_side': which_side,
'all_elements': all_elements,
})
def _render_reaction(self, solution: tuple):
balanced = {
'left': {},
'right': {self.target.material_formula: '1'}
}
solution = list(solution)
which_side = self._linear_eq['which_side']
precursor_solutions = solution[:len(self._precursor_candidates)]
precursor_side = which_side[:len(self._precursor_candidates)]
del solution[:len(self._precursor_candidates)]
del which_side[:len(self._precursor_candidates)]
decomposition_solutions = solution[:len(self._decomposition_chemicals)]
decomposition_side = which_side[:len(self._decomposition_chemicals)]
del solution[:len(self._decomposition_chemicals)]
del which_side[:len(self._decomposition_chemicals)]
exchange_solutions = solution.copy()
exchange_side = which_side.copy()
def decide_side_value(s, val):
if s[0] == 'f':
if s[1] == 'l':
return 'left', val
elif s[1] == 'r':
return 'right', -val
elif s[0] == 'd':
if not isinstance(val, sympy.Float):
value_zero = val.evalf(
subs={x: 0.001 for x in val.free_symbols})
value_negative = float(value_zero) < 0
else:
value_negative = float(val) < 0
if s[1] == 'l':
return ('left', val) if not value_negative else ('right', -val)
elif s[1] == 'r':
return ('right', -val) if value_negative else ('left', val)
for precursor, amount, side in zip(
self._precursor_candidates, precursor_solutions, precursor_side):
material_formula = precursor.material_formula
side, value = decide_side_value(side, amount)
value_s = simplify_print(value)
if value_s != '0':
balanced[side][material_formula] = value_s
for chemical, amount, side in zip(
sorted(self._decomposition_chemicals), decomposition_solutions, decomposition_side):
side, value = decide_side_value(side, amount)
value_s = simplify_print(value)
if value_s != '0':
balanced[side][chemical] = value_s
for chemical, amount, side in zip(
sorted(self._exchange_chemicals), exchange_solutions, exchange_side):
side, value = decide_side_value(side, amount)
value_s = simplify_print(value)
if value_s != '0':
balanced[side][chemical] = value_s
return balanced
def compute_reactions(self):
try:
a = self._linear_eq['coefficient_matrix']
b = self._linear_eq['target_vector']
solution, params = a.gauss_jordan_solve(b)
if len(params) > 0:
raise TooManyPrecursors(
'Too many precursors to balance %r ==> %r' % (
[x.material_formula for x in self.precursors],
self.target.material_formula))
solution = solution.T[:1, :]
except ValueError:
raise TooFewPrecursors('Too few precursors to balance')
return self._render_reaction(solution)
| 37.3
| 100
| 0.607797
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.